Squashed 'third_party/protobuf/' content from commit e35e248

Change-Id: I6cbe123d09fe50fdcad0e51466665daeee7433c7
git-subtree-dir: third_party/protobuf
git-subtree-split: e35e24800fb8d694bdeea5fd63dc7d1b14d68723
diff --git a/src/google/protobuf/stubs/atomic_sequence_num.h b/src/google/protobuf/stubs/atomic_sequence_num.h
new file mode 100644
index 0000000..bb20942
--- /dev/null
+++ b/src/google/protobuf/stubs/atomic_sequence_num.h
@@ -0,0 +1,54 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2014 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_
+#define GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_
+
+#include <google/protobuf/stubs/atomicops.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+class SequenceNumber {
+ public:
+  SequenceNumber() : word_(0) {}
+
+  AtomicWord GetNext() {
+    return NoBarrier_AtomicIncrement(&word_, 1) - 1;
+  }
+ private:
+  AtomicWord word_;
+};
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_
diff --git a/src/google/protobuf/stubs/atomicops.h b/src/google/protobuf/stubs/atomicops.h
new file mode 100644
index 0000000..cb93227
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops.h
@@ -0,0 +1,244 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The routines exported by this module are subtle.  If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain.  If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative.  You should assume only properties explicitly guaranteed by the
+// specifications in this file.  You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break.  If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines.  The NoBarrier
+// versions are provided when no barriers are needed:
+//   NoBarrier_Store()
+//   NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+
+// This header and the implementations for each platform (located in
+// atomicops_internals_*) must be kept in sync with the upstream code (V8).
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_H_
+
+// Don't include this file for people not concerned about thread safety.
+#ifndef GOOGLE_PROTOBUF_NO_THREAD_SAFETY
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/platform_macros.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+#if defined(GOOGLE_PROTOBUF_ARCH_POWER)
+#if defined(_LP64) || defined(__LP64__)
+typedef int32 Atomic32;
+typedef intptr_t Atomic64;
+#else
+typedef intptr_t Atomic32;
+typedef int64 Atomic64;
+#endif
+#else
+typedef int32 Atomic32;
+#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT
+// We need to be able to go between Atomic64 and AtomicWord implicitly.  This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__ILP32__) || defined(GOOGLE_PROTOBUF_OS_NACL)
+// NaCl's intptr_t is not actually 64-bits on 64-bit!
+// http://code.google.com/p/nativeclient/issues/detail?id=1162
+// sparcv9's pointer type is 32bits
+typedef int64 Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                  Atomic32 old_value,
+                                  Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                 Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions.  "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+
+#if defined(__MINGW32__) && defined(MemoryBarrier)
+#undef MemoryBarrier
+#endif
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                  Atomic64 old_value,
+                                  Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif  // GOOGLE_PROTOBUF_ARCH_64_BIT
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+// Include our platform specific implementation.
+#define GOOGLE_PROTOBUF_ATOMICOPS_ERROR \
+"Atomic operations are not supported on your platform"
+
+// ThreadSanitizer, http://clang.llvm.org/docs/ThreadSanitizer.html.
+#if defined(THREAD_SANITIZER)
+#include <google/protobuf/stubs/atomicops_internals_tsan.h>
+// MSVC.
+#elif defined(_MSC_VER)
+#if defined(GOOGLE_PROTOBUF_ARCH_IA32) || defined(GOOGLE_PROTOBUF_ARCH_X64)
+#include <google/protobuf/stubs/atomicops_internals_x86_msvc.h>
+#else
+#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR
+#endif
+
+// Solaris
+#elif defined(GOOGLE_PROTOBUF_OS_SOLARIS)
+#include <google/protobuf/stubs/atomicops_internals_solaris.h>
+
+// AIX
+#elif defined(GOOGLE_PROTOBUF_OS_AIX)
+#include <google/protobuf/stubs/atomicops_internals_power.h>
+
+// Apple.
+#elif defined(GOOGLE_PROTOBUF_OS_APPLE)
+#include <google/protobuf/stubs/atomicops_internals_macosx.h>
+
+// GCC.
+#elif defined(__GNUC__)
+#if defined(GOOGLE_PROTOBUF_ARCH_IA32) || defined(GOOGLE_PROTOBUF_ARCH_X64)
+#include <google/protobuf/stubs/atomicops_internals_x86_gcc.h>
+#elif defined(GOOGLE_PROTOBUF_ARCH_ARM) && defined(__linux__)
+#include <google/protobuf/stubs/atomicops_internals_arm_gcc.h>
+#elif defined(GOOGLE_PROTOBUF_ARCH_AARCH64)
+#include <google/protobuf/stubs/atomicops_internals_arm64_gcc.h>
+#elif defined(GOOGLE_PROTOBUF_ARCH_ARM_QNX)
+#include <google/protobuf/stubs/atomicops_internals_arm_qnx.h>
+#elif defined(GOOGLE_PROTOBUF_ARCH_MIPS) || defined(GOOGLE_PROTOBUF_ARCH_MIPS64)
+#include <google/protobuf/stubs/atomicops_internals_mips_gcc.h>
+#elif defined(GOOGLE_PROTOBUF_ARCH_POWER)
+#include <google/protobuf/stubs/atomicops_internals_power.h>
+#elif defined(__native_client__)
+#include <google/protobuf/stubs/atomicops_internals_pnacl.h>
+#elif (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4))
+#include <google/protobuf/stubs/atomicops_internals_generic_gcc.h>
+#elif defined(__clang__)
+#if __has_extension(c_atomic)
+#include <google/protobuf/stubs/atomicops_internals_generic_gcc.h>
+#else
+#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR
+#endif
+#else
+#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR
+#endif
+
+// Unknown.
+#else
+#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR
+#endif
+
+// On some platforms we need additional declarations to make AtomicWord
+// compatible with our other Atomic* types.
+#if defined(GOOGLE_PROTOBUF_OS_APPLE)
+#include <google/protobuf/stubs/atomicops_internals_atomicword_compat.h>
+#endif
+
+#undef GOOGLE_PROTOBUF_ATOMICOPS_ERROR
+
+#endif  // GOOGLE_PROTOBUF_NO_THREAD_SAFETY
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h b/src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h
new file mode 100644
index 0000000..0a2d2b8
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h
@@ -0,0 +1,325 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__ ("dmb ish" ::: "memory");  // NOLINT
+}
+
+// NoBarrier versions of the operation include "memory" in the clobber list.
+// This is not required for direct usage of the NoBarrier versions of the
+// operations. However this is required for correctness when they are used as
+// part of the Acquire or Release versions, to ensure that nothing from outside
+// the call is reordered between the operation and the memory barrier. This does
+// not change the code generated, so has no or minimal impact on the
+// NoBarrier operations.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
+    "cmp %w[prev], %w[old_value]           \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    "1:                                    \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"IJr" (old_value),
+      [new_value]"r" (new_value)
+    : "cc", "memory"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [new_value]"r" (new_value)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                       \n\t"
+    "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
+    "add %w[result], %w[result], %w[increment]\n\t"
+    "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
+    "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [increment]"IJr" (increment)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  MemoryBarrier();
+  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+
+  return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+  return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __asm__ __volatile__ (  // NOLINT
+    "stlr %w[value], %[ptr]  \n\t"
+    : [ptr]"=Q" (*ptr)
+    : [value]"r" (value)
+    : "memory"
+  );  // NOLINT
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value;
+
+  __asm__ __volatile__ (  // NOLINT
+    "ldar %w[value], %[ptr]  \n\t"
+    : [value]"=r" (value)
+    : [ptr]"Q" (*ptr)
+    : "memory"
+  );  // NOLINT
+
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[prev], %[ptr]                  \n\t"
+    "cmp %[prev], %[old_value]             \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    "1:                                    \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"IJr" (old_value),
+      [new_value]"r" (new_value)
+    : "cc", "memory"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[result], %[ptr]                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [new_value]"r" (new_value)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                     \n\t"
+    "ldxr %[result], %[ptr]                 \n\t"
+    "add %[result], %[result], %[increment] \n\t"
+    "stxr %w[temp], %[result], %[ptr]       \n\t"
+    "cbnz %w[temp], 0b                      \n\t"
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [increment]"IJr" (increment)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  MemoryBarrier();
+  Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+
+  return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  MemoryBarrier();
+  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+  return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __asm__ __volatile__ (  // NOLINT
+    "stlr %x[value], %[ptr]  \n\t"
+    : [ptr]"=Q" (*ptr)
+    : [value]"r" (value)
+    : "memory"
+  );  // NOLINT
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value;
+
+  __asm__ __volatile__ (  // NOLINT
+    "ldar %x[value], %[ptr]  \n\t"
+    : [value]"=r" (value)
+    : [ptr]"Q" (*ptr)
+    : "memory"
+  );  // NOLINT
+
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_arm_gcc.h b/src/google/protobuf/stubs/atomicops_internals_arm_gcc.h
new file mode 100644
index 0000000..90e727b
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_arm_gcc.h
@@ -0,0 +1,151 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// 0xffff0fc0 is the hard coded address of a function provided by
+// the kernel which implements an atomic compare-exchange. On older
+// ARM architecture revisions (pre-v6) this may be implemented using
+// a syscall. This address is stable, and in active use (hard coded)
+// by at least glibc-2.7 and the Android C library.
+typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
+                                           Atomic32 new_value,
+                                           volatile Atomic32* ptr);
+LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
+    (LinuxKernelCmpxchgFunc) 0xffff0fc0;
+
+typedef void (*LinuxKernelMemoryBarrierFunc)(void);
+LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
+    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value = *ptr;
+  do {
+    if (!pLinuxKernelCmpxchg(old_value, new_value,
+                             const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (pLinuxKernelCmpxchg(old_value, new_value,
+                               const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  for (;;) {
+    // Atomic exchange the old value with an incremented one.
+    Atomic32 old_value = *ptr;
+    Atomic32 new_value = old_value + increment;
+    if (pLinuxKernelCmpxchg(old_value, new_value,
+                            const_cast<Atomic32*>(ptr)) == 0) {
+      // The exchange took place as expected.
+      return new_value;
+    }
+    // Otherwise, *ptr changed mid-loop and we need to retry.
+  }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  pLinuxKernelMemoryBarrier();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_arm_qnx.h b/src/google/protobuf/stubs/atomicops_internals_arm_qnx.h
new file mode 100644
index 0000000..17dfaa5
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_arm_qnx.h
@@ -0,0 +1,146 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_
+
+// For _smp_cmpxchg()
+#include <pthread.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline Atomic32 QNXCmpxchg(Atomic32 old_value,
+                           Atomic32 new_value,
+                           volatile Atomic32* ptr) {
+  return static_cast<Atomic32>(
+      _smp_cmpxchg((volatile unsigned *)ptr,
+                   (unsigned)old_value,
+                   (unsigned)new_value));
+}
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value = *ptr;
+  do {
+    if (!QNXCmpxchg(old_value, new_value,
+                    const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (QNXCmpxchg(old_value, new_value,
+                      const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  for (;;) {
+    // Atomic exchange the old value with an incremented one.
+    Atomic32 old_value = *ptr;
+    Atomic32 new_value = old_value + increment;
+    if (QNXCmpxchg(old_value, new_value,
+                   const_cast<Atomic32*>(ptr)) == 0) {
+      // The exchange took place as expected.
+      return new_value;
+    }
+    // Otherwise, *ptr changed mid-loop and we need to retry.
+  }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __sync_synchronize();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_atomicword_compat.h b/src/google/protobuf/stubs/atomicops_internals_atomicword_compat.h
new file mode 100644
index 0000000..eb198ff
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_atomicword_compat.h
@@ -0,0 +1,122 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(GOOGLE_PROTOBUF_ARCH_64_BIT)
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+                                           AtomicWord old_value,
+                                           AtomicWord new_value) {
+  return NoBarrier_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+                                           AtomicWord new_value) {
+  return NoBarrier_AtomicExchange(
+      reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                            AtomicWord increment) {
+  return NoBarrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                          AtomicWord increment) {
+  return Barrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return Acquire_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return Release_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+  NoBarrier_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return Acquire_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return Release_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+  return NoBarrier_Load(reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+  return Acquire_Load(reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+  return Release_Load(reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+}   // namespace internal
+}   // namespace protobuf
+}   // namespace google
+
+#endif  // !defined(GOOGLE_PROTOBUF_ARCH_64_BIT)
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h b/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
new file mode 100644
index 0000000..a0116a6
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
@@ -0,0 +1,137 @@
+// Copyright 2013 Red Hat Inc.  All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Red Hat Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
+                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+  return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
+                              __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void MemoryBarrier() {
+  __sync_synchronize();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
+}
+
+#ifdef __LP64__
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
+                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+  return old_value;
+}
+
+#endif // defined(__LP64__)
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_macosx.h b/src/google/protobuf/stubs/atomicops_internals_macosx.h
new file mode 100644
index 0000000..7963324
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_macosx.h
@@ -0,0 +1,225 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32(old_value, new_value,
+                                 const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+                                     const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+  OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+                                        const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64(old_value, new_value,
+                                 reinterpret_cast<volatile int64_t*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+                                     reinterpret_cast<volatile int64_t*>(ptr)));
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return OSAtomicAdd64Barrier(increment,
+                              reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64Barrier(
+        old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  // The lib kern interface does not distinguish between
+  // Acquire and Release memory barriers; they are equivalent.
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#endif  // defined(__LP64__)
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_mips_gcc.h b/src/google/protobuf/stubs/atomicops_internals_mips_gcc.h
new file mode 100644
index 0000000..f5837c9
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_mips_gcc.h
@@ -0,0 +1,313 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %5\n"  // prev = *ptr
+                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
+                       "move %2, %4\n"  // tmp = new_value
+                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "beqz %2, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "r" (old_value), "r" (new_value), "m" (*ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %1, %4\n"  // old = *ptr
+                       "move %0, %3\n"  // temp = new_value
+                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+                       : "r" (new_value), "m" (*ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %4\n"  // temp = *ptr
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+                       : "Ir" (increment), "m" (*ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  ATOMICOPS_COMPILER_BARRIER();
+  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(__LP64__)
+// 64-bit versions of the atomic ops.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %0, %5\n"  // prev = *ptr
+                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
+                       "move %2, %4\n"  // tmp = new_value
+                       "scd %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "beqz %2, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "r" (old_value), "r" (new_value), "m" (*ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %1, %4\n"  // old = *ptr
+                       "move %0, %3\n"  // temp = new_value
+                       "scd %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+                       : "r" (new_value), "m" (*ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %0, %4\n"  // temp = *ptr
+                       "daddu %1, %0, %3\n"  // temp2 = temp + increment
+                       "scd %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "daddu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+                       : "Ir" (increment), "m" (*ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  MemoryBarrier();
+  Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return res;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+#endif
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_pnacl.h b/src/google/protobuf/stubs/atomicops_internals_pnacl.h
new file mode 100644
index 0000000..3b314fd
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_pnacl.h
@@ -0,0 +1,231 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PNACL_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PNACL_H_
+
+#include <atomic>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h. This requires casting memory locations to the atomic types, and
+// assumes that the API and the C++11 implementation are layout-compatible,
+// which isn't true for all implementations or hardware platforms. The static
+// assertion should detect this issue, were it to fire then this header
+// shouldn't be used.
+//
+// TODO(jfb) If this header manages to stay committed then the API should be
+//           modified, and all call sites updated.
+typedef volatile std::atomic<Atomic32>* AtomicLocation32;
+static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
+              "incompatible 32-bit atomic layout");
+
+inline void MemoryBarrier() {
+#if defined(__GLIBCXX__)
+  // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
+  // not defined, leading to the linker complaining about undefined references.
+  __atomic_thread_fence(std::memory_order_seq_cst);
+#else
+  std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_relaxed,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  return ((AtomicLocation32)ptr)
+      ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return increment +
+         ((AtomicLocation32)ptr)
+             ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_acquire,
+                                std::memory_order_acquire);
+  return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_release,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+#if defined(GOOGLE_PROTOBUF_ARCH_64_BIT)
+
+typedef volatile std::atomic<Atomic64>* AtomicLocation64;
+static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
+              "incompatible 64-bit atomic layout");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_relaxed,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  return ((AtomicLocation64)ptr)
+      ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return increment +
+         ((AtomicLocation64)ptr)
+             ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_acquire,
+                                std::memory_order_acquire);
+  return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_release,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+#endif  // defined(GOOGLE_PROTOBUF_ARCH_64_BIT)
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PNACL_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_power.h b/src/google/protobuf/stubs/atomicops_internals_power.h
new file mode 100644
index 0000000..b8a42f2
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_power.h
@@ -0,0 +1,440 @@
+// Copyright 2014 Bloomberg Finance LP. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Bloomberg Finance LP. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_AIX_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_AIX_H_
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 result;
+
+  asm volatile (
+      "1:     lwarx %[res], %[zero], %[obj]   \n\t"  // load and reserve
+      "       cmpw %[cmp], %[res]             \n\t"  // compare values
+      "       bne- 2f                         \n\t"
+      "       stwcx. %[val], %[zero], %[obj]  \n\t"  // store new value
+      "       bne- 1b                         \n\t"
+      "2:                                     \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [cmp]  "b"   (old_value),
+                [val]  "b"   (new_value),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 result;
+
+  asm volatile (
+      "1:     lwarx %[res], %[zero], %[obj]       \n\t"
+      "       stwcx. %[val], %[zero], %[obj]      \n\t"
+      "       bne- 1b                             \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [val]  "b"   (new_value),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 result;
+
+  asm volatile (
+      "1:     lwarx %[res], %[zero], %[obj]   \n\t"  // load and reserve
+      "       add %[res], %[val], %[res]      \n\t"  // add the operand
+      "       stwcx. %[res], %[zero], %[obj]  \n\t"  // store old value
+                                                     // if still reserved
+      "       bne- 1b                         \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [val]  "b"   (increment),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline void MemoryBarrier(void) {
+  asm volatile (
+      "       lwsync                          \n\t"
+      "       isync                           \n\t"
+              :
+              :
+              : "memory");
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  Atomic32 result;
+
+  asm volatile (
+      "       lwsync                          \n\t"
+
+      "1:     lwarx %[res], %[zero], %[obj]   \n\t"  // load and reserve
+      "       add %[res], %[val], %[res]      \n\t"  // add the operand
+      "       stwcx. %[res], %[zero], %[obj]  \n\t"  // store old value
+                                                     // if still reserved
+      "       bne- 1b                         \n\t"
+      "       isync                           \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [val]  "b"   (increment),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 result;
+
+  asm volatile (
+      "1:     lwarx %[res], %[zero], %[obj]   \n\t"  // load and reserve
+      "       cmpw %[cmp], %[res]             \n\t"  // compare values
+      "       bne- 2f                         \n\t"
+      "       stwcx. %[val], %[zero], %[obj]  \n\t"  // store new value
+      "       bne- 1b                         \n\t"
+
+      "       isync                           \n\t"
+      "2:                                     \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [cmp]  "b"   (old_value),
+                [val]  "b"   (new_value),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 result;
+
+  asm volatile (
+      "       lwsync                          \n\t"
+
+      "1:     lwarx %[res], %[zero], %[obj]   \n\t"  // load and reserve
+      "       cmpw %[cmp], %[res]             \n\t"  // compare values
+      "       bne- 2f                         \n\t"
+      "       stwcx. %[val], %[zero], %[obj]  \n\t"  // store new value
+      "       bne- 1b                         \n\t"
+
+      "2:                                     \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [cmp]  "b"   (old_value),
+                [val]  "b"   (new_value),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  asm volatile (
+      "       stw %[val], %[obj]      \n\t"
+      "       isync                   \n\t"
+              : [obj] "=m" (*ptr)
+              : [val]  "b"  (value));
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  asm volatile (
+      "       lwsync                  \n\t"
+      "       stw %[val], %[obj]      \n\t"
+              : [obj] "=m" (*ptr)
+              : [val]  "b"  (value));
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 result;
+
+  asm volatile (
+      "1:     lwz %[res], %[obj]              \n\t"
+      "       cmpw %[res], %[res]             \n\t" // create data
+                                                    // dependency for
+                                                    // load/load ordering
+      "       bne- 1b                         \n\t" // never taken
+
+      "       isync                           \n\t"
+              : [res]  "=b" (result)
+              : [obj]  "m"  (*ptr),
+                [zero] "i"  (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  Atomic32 result;
+
+  asm volatile (
+      "       lwsync                          \n\t"
+
+      "1:     lwz %[res], %[obj]              \n\t"
+      "       cmpw %[res], %[res]             \n\t" // create data
+                                                    // dependency for
+                                                    // load/load ordering
+      "       bne- 1b                         \n\t" // never taken
+              : [res]  "=b" (result)
+              : [obj]  "m"  (*ptr),
+                [zero] "i"  (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 result;
+
+  asm volatile (
+      "1:     ldarx %[res], %[zero], %[obj]   \n\t"  // load and reserve
+      "       cmpd %[cmp], %[res]             \n\t"  // compare values
+      "       bne- 2f                         \n\t"
+
+      "       stdcx. %[val], %[zero], %[obj]  \n\t"  // store the new value
+      "       bne- 1b                         \n\t"
+      "2:                                     \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [cmp]  "b"   (old_value),
+                [val]  "b"   (new_value),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 result;
+
+  asm volatile (
+      "1:     ldarx %[res], %[zero], %[obj]       \n\t"
+      "       stdcx. %[val], %[zero], %[obj]      \n\t"
+      "       bne- 1b                             \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [val]  "b"   (new_value),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 result;
+
+  asm volatile (
+      "1:     ldarx %[res], %[zero], %[obj]   \n\t" // load and reserve
+      "       add %[res], %[res], %[val]      \n\t" // add the operand
+      "       stdcx. %[res], %[zero], %[obj]  \n\t" // store old value if
+                                                    // still reserved
+
+      "       bne- 1b                         \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [val]  "b"   (increment),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+
+  Atomic64 result;
+
+  asm volatile (
+      "       lwsync                          \n\t"
+
+      "1:     ldarx %[res], %[zero], %[obj]   \n\t" // load and reserve
+      "       add %[res], %[res], %[val]      \n\t" // add the operand
+      "       stdcx. %[res], %[zero], %[obj]  \n\t" // store old value if
+                                                    // still reserved
+
+      "       bne- 1b                         \n\t"
+
+      "       isync                           \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [val]  "b"   (increment),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 result;
+
+  asm volatile (
+      "1:     ldarx %[res], %[zero], %[obj]   \n\t"  // load and reserve
+      "       cmpd %[cmp], %[res]             \n\t"  // compare values
+      "       bne- 2f                         \n\t"
+
+      "       stdcx. %[val], %[zero], %[obj]  \n\t"  // store the new value
+      "       bne- 1b                         \n\t"
+      "       isync                           \n\t"
+      "2:                                     \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [cmp]  "b"   (old_value),
+                [val]  "b"   (new_value),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 result;
+
+  asm volatile (
+      "       lwsync                          \n\t"
+
+      "1:     ldarx %[res], %[zero], %[obj]   \n\t"  // load and reserve
+      "       cmpd %[cmp], %[res]             \n\t"  // compare values
+      "       bne- 2f                         \n\t"
+
+      "       stdcx. %[val], %[zero], %[obj]  \n\t"  // store the new value
+      "       bne- 1b                         \n\t"
+      "2:                                     \n\t"
+              : [res]  "=&b" (result)
+              : [obj]  "b"   (ptr),
+                [cmp]  "b"   (old_value),
+                [val]  "b"   (new_value),
+                [zero] "i"   (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  asm volatile (
+      "       std %[val], %[obj]          \n\t"
+      "       isync                       \n\t"
+              : [obj] "=m" (*ptr)
+              : [val] "b"  (value));
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  asm volatile (
+      "       lwsync                      \n\t"
+      "       std %[val], %[obj]          \n\t"
+              : [obj] "=m" (*ptr)
+              : [val] "b"  (value));
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 result;
+
+  asm volatile (
+      "1:     ld %[res], %[obj]                   \n\t"
+      "       cmpd %[res], %[res]                 \n\t" // create data
+                                                        // dependency for
+                                                        // load/load ordering
+      "       bne- 1b                             \n\t" // never taken
+
+      "       isync                               \n\t"
+              : [res]  "=b" (result)
+              : [obj]  "m"  (*ptr),
+                [zero] "i"  (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  Atomic64 result;
+
+  asm volatile (
+      "       lwsync                              \n\t"
+
+      "1:     ld %[res], %[obj]                   \n\t"
+      "       cmpd %[res], %[res]                 \n\t" // create data
+                                                        // dependency for
+                                                        // load/load ordering
+      "       bne- 1b                             \n\t" // never taken
+              : [res]  "=b" (result)
+              : [obj]  "m"  (*ptr),
+                [zero] "i"  (0)
+              : "cr0", "ctr");
+
+  return result;
+}
+#endif
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_solaris.h b/src/google/protobuf/stubs/atomicops_internals_solaris.h
new file mode 100644
index 0000000..d8057ec
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_solaris.h
@@ -0,0 +1,188 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_
+
+#include <atomic.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  return (Atomic32)atomic_cas_32((volatile uint32_t*)ptr, (uint32_t)old_value, (uint32_t)new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  return (Atomic32)atomic_swap_32((volatile uint32_t*)ptr, (uint32_t)new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return (Atomic32)atomic_add_32_nv((volatile uint32_t*)ptr, (uint32_t)increment);
+}
+
+inline void MemoryBarrier(void) {
+	membar_producer();
+	membar_consumer();
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  MemoryBarrier();
+  Atomic32 ret = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return ret;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+
+  return ret;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  membar_producer();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  membar_consumer();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 val = *ptr;
+  membar_consumer();
+  return val;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  membar_producer();
+  return *ptr;
+}
+
+#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  return atomic_cas_64((volatile uint64_t*)ptr, (uint64_t)old_value, (uint64_t)new_value);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) {
+  return atomic_swap_64((volatile uint64_t*)ptr, (uint64_t)new_value);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) {
+  return atomic_add_64_nv((volatile uint64_t*)ptr, increment);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) {
+  MemoryBarrier();
+  Atomic64 ret = atomic_add_64_nv((volatile uint64_t*)ptr, increment);
+  MemoryBarrier();
+  return ret;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return ret;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  membar_producer();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  membar_consumer();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 ret = *ptr;
+  membar_consumer();
+  return ret;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  membar_producer();
+  return *ptr;
+}
+#endif
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_
+
diff --git a/src/google/protobuf/stubs/atomicops_internals_tsan.h b/src/google/protobuf/stubs/atomicops_internals_tsan.h
new file mode 100644
index 0000000..0c90354
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_tsan.h
@@ -0,0 +1,219 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2013 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer (http://clang.llvm.org/docs/ThreadSanitizer.html).
+// Use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+#include <sanitizer/tsan_interface_atomic.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+                                         Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+                                       Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+                                       Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+                                          Atomic32 increment) {
+  return increment + __tsan_atomic32_fetch_add(ptr, increment,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+                                        Atomic32 increment) {
+  return increment + __tsan_atomic32_fetch_add(ptr, increment,
+      __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+  return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+                                         Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+                                       Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+                                       Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+                                          Atomic64 increment) {
+  return increment + __tsan_atomic64_fetch_add(ptr, increment,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+                                        Atomic64 increment) {
+  return increment + __tsan_atomic64_fetch_add(ptr, increment,
+      __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+  return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline void MemoryBarrier() {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_x86_gcc.cc b/src/google/protobuf/stubs/atomicops_internals_x86_gcc.cc
new file mode 100644
index 0000000..53c9eae
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_x86_gcc.cc
@@ -0,0 +1,137 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This module gets enough CPU information to optimize the
+// atomicops module on x86.
+
+#include <cstring>
+
+#include <google/protobuf/stubs/atomicops.h>
+
+// This file only makes sense with atomicops_internals_x86_gcc.h -- it
+// depends on structs that are defined in that file.  If atomicops.h
+// doesn't sub-include that file, then we aren't needed, and shouldn't
+// try to do anything.
+#ifdef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// Inline cpuid instruction.  In PIC compilations, %ebx contains the address
+// of the global offset table.  To avoid breaking such executables, this code
+// must preserve that register's value across cpuid instructions.
+#if defined(__i386__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%ebx, %%edi\n"     \
+      "cpuid\n"                \
+      "xchg %%edi, %%ebx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined(__x86_64__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%rbx, %%rdi\n"     \
+      "cpuid\n"                \
+      "xchg %%rdi, %%rbx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#endif
+
+#if defined(cpuid)        // initialize the struct only on x86
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// Set the flags so that code will run correctly and conservatively, so even
+// if we haven't been initialized yet, we're probably single threaded, and our
+// default values should hopefully be pretty safe.
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+  false,          // bug can't exist before process spawns multiple threads
+  false,          // no SSE2
+};
+
+namespace {
+
+// Initialize the AtomicOps_Internalx86CPUFeatures struct.
+void AtomicOps_Internalx86CPUFeaturesInit() {
+  uint32_t eax;
+  uint32_t ebx;
+  uint32_t ecx;
+  uint32_t edx;
+
+  // Get vendor string (issue CPUID with eax = 0)
+  cpuid(eax, ebx, ecx, edx, 0);
+  char vendor[13];
+  memcpy(vendor, &ebx, 4);
+  memcpy(vendor + 4, &edx, 4);
+  memcpy(vendor + 8, &ecx, 4);
+  vendor[12] = 0;
+
+  // get feature flags in ecx/edx, and family/model in eax
+  cpuid(eax, ebx, ecx, edx, 1);
+
+  int family = (eax >> 8) & 0xf;        // family and model fields
+  int model = (eax >> 4) & 0xf;
+  if (family == 0xf) {                  // use extended family and model fields
+    family += (eax >> 20) & 0xff;
+    model += ((eax >> 16) & 0xf) << 4;
+  }
+
+  // Opteron Rev E has a bug in which on very rare occasions a locked
+  // instruction doesn't act as a read-acquire barrier if followed by a
+  // non-locked read-modify-write instruction.  Rev F has this bug in
+  // pre-release versions, but not in versions released to customers,
+  // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+  if (strcmp(vendor, "AuthenticAMD") == 0 &&       // AMD
+      family == 15 &&
+      32 <= model && model <= 63) {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+  } else {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+  }
+
+  // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+  AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+}
+
+class AtomicOpsx86Initializer {
+ public:
+  AtomicOpsx86Initializer() {
+    AtomicOps_Internalx86CPUFeaturesInit();
+  }
+};
+
+// A global to get use initialized on startup via static initialization :/
+AtomicOpsx86Initializer g_initer;
+
+}  // namespace
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // __i386__
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h b/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
new file mode 100644
index 0000000..edccc59
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
@@ -0,0 +1,293 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// This struct is not part of the public API of this module; clients may not
+// use it.
+// Features of this x86.  Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
+                             // after acquire compare-and-swap.
+  bool has_sse2;             // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+// 32-bit low-level operations on any platform.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  __asm__ __volatile__("lock; cmpxchgl %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  __asm__ __volatile__("xchgl %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  return temp + increment;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit implementations of memory barrier can be simpler, because it
+// "mfence" is guaranteed to exist.
+inline void MemoryBarrier() {
+  __asm__ __volatile__("mfence" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+#else
+
+inline void MemoryBarrier() {
+  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+    __asm__ __volatile__("mfence" : : : "memory");
+  } else {  // mfence is faster but not present on PIII
+    Atomic32 x = 0;
+    NoBarrier_AtomicExchange(&x, 0);  // acts as a barrier on PIII
+  }
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+    *ptr = value;
+    __asm__ __volatile__("mfence" : : : "memory");
+  } else {
+    NoBarrier_AtomicExchange(ptr, value);
+                          // acts as a barrier on PIII
+  }
+}
+#endif
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  *ptr = value;  // An x86 store acts as a release barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  __asm__ __volatile__("lock; cmpxchgq %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  __asm__ __volatile__("xchgq %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  return temp + increment;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+
+  *ptr = value;  // An x86 store acts as a release barrier
+                 // for current AMD/Intel chips as of Jan 2008.
+                 // See also Acquire_Load(), below.
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+  //
+  // x86 stores/loads fail to act as barriers for a few instructions (clflush
+  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
+  // not generated by the compiler, and are rare.  Users of these instructions
+  // need to know about cache behaviour in any case since all of these involve
+  // either flushing cache lines or non-temporal cache hints.
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;  // An x86 load acts as a acquire barrier,
+                          // for current AMD/Intel chips as of Jan 2008.
+                          // See also Release_Store(), above.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#endif  // defined(__x86_64__)
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/google/protobuf/stubs/atomicops_internals_x86_msvc.cc b/src/google/protobuf/stubs/atomicops_internals_x86_msvc.cc
new file mode 100644
index 0000000..741b164
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_x86_msvc.cc
@@ -0,0 +1,112 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The compilation of extension_set.cc fails when windows.h is included.
+// Therefore we move the code depending on windows.h to this separate cc file.
+
+// Don't compile this file for people not concerned about thread safety.
+#ifndef GOOGLE_PROTOBUF_NO_THREAD_SAFETY
+
+#include <google/protobuf/stubs/atomicops.h>
+
+#ifdef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include <windows.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline void MemoryBarrier() {
+  // We use MemoryBarrier from WinNT.h
+  ::MemoryBarrier();
+}
+
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                  Atomic32 old_value,
+                                  Atomic32 new_value) {
+  LONG result = InterlockedCompareExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value),
+      static_cast<LONG>(old_value));
+  return static_cast<Atomic32>(result);
+}
+
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                  Atomic32 new_value) {
+  LONG result = InterlockedExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value));
+  return static_cast<Atomic32>(result);
+}
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                 Atomic32 increment) {
+  return InterlockedExchangeAdd(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(increment)) + increment;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                  Atomic64 old_value,
+                                  Atomic64 new_value) {
+  PVOID result = InterlockedCompareExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                  Atomic64 new_value) {
+  PVOID result = InterlockedExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                 Atomic64 increment) {
+  return InterlockedExchangeAdd64(
+      reinterpret_cast<volatile LONGLONG*>(ptr),
+      static_cast<LONGLONG>(increment)) + increment;
+}
+
+#endif  // defined(_WIN64)
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#endif  // GOOGLE_PROTOBUF_NO_THREAD_SAFETY
diff --git a/src/google/protobuf/stubs/atomicops_internals_x86_msvc.h b/src/google/protobuf/stubs/atomicops_internals_x86_msvc.h
new file mode 100644
index 0000000..e53a641
--- /dev/null
+++ b/src/google/protobuf/stubs/atomicops_internals_x86_msvc.h
@@ -0,0 +1,150 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
+#error "We require at least vs2005 for MemoryBarrier"
+#endif
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
+  // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#endif  // defined(_WIN64)
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/src/google/protobuf/stubs/bytestream.cc b/src/google/protobuf/stubs/bytestream.cc
new file mode 100644
index 0000000..f4af6a5
--- /dev/null
+++ b/src/google/protobuf/stubs/bytestream.cc
@@ -0,0 +1,196 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <google/protobuf/stubs/bytestream.h>
+
+#include <string.h>
+#include <algorithm>
+
+namespace google {
+namespace protobuf {
+namespace strings {
+
+void ByteSource::CopyTo(ByteSink* sink, size_t n) {
+  while (n > 0) {
+    StringPiece fragment = Peek();
+    if (fragment.empty()) {
+      GOOGLE_LOG(DFATAL) << "ByteSource::CopyTo() overran input.";
+      break;
+    }
+    std::size_t fragment_size = std::min<std::size_t>(n, fragment.size());
+    sink->Append(fragment.data(), fragment_size);
+    Skip(fragment_size);
+    n -= fragment_size;
+  }
+}
+
+void ByteSink::Flush() {}
+
+void UncheckedArrayByteSink::Append(const char* data, size_t n) {
+  if (data != dest_) {
+    // Catch cases where the pointer returned by GetAppendBuffer() was modified.
+    GOOGLE_DCHECK(!(dest_ <= data && data < (dest_ + n)))
+        << "Append() data[] overlaps with dest_[]";
+    memcpy(dest_, data, n);
+  }
+  dest_ += n;
+}
+
+CheckedArrayByteSink::CheckedArrayByteSink(char* outbuf, size_t capacity)
+    : outbuf_(outbuf), capacity_(capacity), size_(0), overflowed_(false) {
+}
+
+void CheckedArrayByteSink::Append(const char* bytes, size_t n) {
+  size_t available = capacity_ - size_;
+  if (n > available) {
+    n = available;
+    overflowed_ = true;
+  }
+  if (n > 0 && bytes != (outbuf_ + size_)) {
+    // Catch cases where the pointer returned by GetAppendBuffer() was modified.
+    GOOGLE_DCHECK(!(outbuf_ <= bytes && bytes < (outbuf_ + capacity_)))
+        << "Append() bytes[] overlaps with outbuf_[]";
+    memcpy(outbuf_ + size_, bytes, n);
+  }
+  size_ += n;
+}
+
+GrowingArrayByteSink::GrowingArrayByteSink(size_t estimated_size)
+    : capacity_(estimated_size),
+      buf_(new char[estimated_size]),
+      size_(0) {
+}
+
+GrowingArrayByteSink::~GrowingArrayByteSink() {
+  delete[] buf_;  // Just in case the user didn't call GetBuffer.
+}
+
+void GrowingArrayByteSink::Append(const char* bytes, size_t n) {
+  size_t available = capacity_ - size_;
+  if (bytes != (buf_ + size_)) {
+    // Catch cases where the pointer returned by GetAppendBuffer() was modified.
+    // We need to test for this before calling Expand() which may reallocate.
+    GOOGLE_DCHECK(!(buf_ <= bytes && bytes < (buf_ + capacity_)))
+        << "Append() bytes[] overlaps with buf_[]";
+  }
+  if (n > available) {
+    Expand(n - available);
+  }
+  if (n > 0 && bytes != (buf_ + size_)) {
+    memcpy(buf_ + size_, bytes, n);
+  }
+  size_ += n;
+}
+
+char* GrowingArrayByteSink::GetBuffer(size_t* nbytes) {
+  ShrinkToFit();
+  char* b = buf_;
+  *nbytes = size_;
+  buf_ = NULL;
+  size_ = capacity_ = 0;
+  return b;
+}
+
+void GrowingArrayByteSink::Expand(size_t amount) {  // Expand by at least 50%.
+  size_t new_capacity = std::max(capacity_ + amount, (3 * capacity_) / 2);
+  char* bigger = new char[new_capacity];
+  memcpy(bigger, buf_, size_);
+  delete[] buf_;
+  buf_ = bigger;
+  capacity_ = new_capacity;
+}
+
+void GrowingArrayByteSink::ShrinkToFit() {
+  // Shrink only if the buffer is large and size_ is less than 3/4
+  // of capacity_.
+  if (capacity_ > 256 && size_ < (3 * capacity_) / 4) {
+    char* just_enough = new char[size_];
+    memcpy(just_enough, buf_, size_);
+    delete[] buf_;
+    buf_ = just_enough;
+    capacity_ = size_;
+  }
+}
+
+void StringByteSink::Append(const char* data, size_t n) {
+  dest_->append(data, n);
+}
+
+size_t ArrayByteSource::Available() const {
+  return input_.size();
+}
+
+StringPiece ArrayByteSource::Peek() {
+  return input_;
+}
+
+void ArrayByteSource::Skip(size_t n) {
+  GOOGLE_DCHECK_LE(n, input_.size());
+  input_.remove_prefix(n);
+}
+
+LimitByteSource::LimitByteSource(ByteSource *source, size_t limit)
+  : source_(source),
+    limit_(limit) {
+}
+
+size_t LimitByteSource::Available() const {
+  size_t available = source_->Available();
+  if (available > limit_) {
+    available = limit_;
+  }
+
+  return available;
+}
+
+StringPiece LimitByteSource::Peek() {
+  StringPiece piece(source_->Peek());
+  if (piece.size() > limit_) {
+    piece.set(piece.data(), limit_);
+  }
+
+  return piece;
+}
+
+void LimitByteSource::Skip(size_t n) {
+  GOOGLE_DCHECK_LE(n, limit_);
+  source_->Skip(n);
+  limit_ -= n;
+}
+
+void LimitByteSource::CopyTo(ByteSink *sink, size_t n) {
+  GOOGLE_DCHECK_LE(n, limit_);
+  source_->CopyTo(sink, n);
+  limit_ -= n;
+}
+
+}  // namespace strings
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/bytestream.h b/src/google/protobuf/stubs/bytestream.h
new file mode 100644
index 0000000..de8e020
--- /dev/null
+++ b/src/google/protobuf/stubs/bytestream.h
@@ -0,0 +1,348 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file declares the ByteSink and ByteSource abstract interfaces. These
+// interfaces represent objects that consume (ByteSink) or produce (ByteSource)
+// a sequence of bytes. Using these abstract interfaces in your APIs can help
+// make your code work with a variety of input and output types.
+//
+// This file also declares the following commonly used implementations of these
+// interfaces.
+//
+//   ByteSink:
+//      UncheckedArrayByteSink  Writes to an array, without bounds checking
+//      CheckedArrayByteSink    Writes to an array, with bounds checking
+//      GrowingArrayByteSink    Allocates and writes to a growable buffer
+//      StringByteSink          Writes to an STL string
+//      NullByteSink            Consumes a never-ending stream of bytes
+//
+//   ByteSource:
+//      ArrayByteSource         Reads from an array or string/StringPiece
+//      LimitedByteSource       Limits the number of bytes read from an
+
+#ifndef GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_
+#define GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_
+
+#include <stddef.h>
+#include <string>
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/stringpiece.h>
+
+class CordByteSink;
+class MemBlock;
+
+namespace google {
+namespace protobuf {
+namespace strings {
+
+// An abstract interface for an object that consumes a sequence of bytes. This
+// interface offers 3 different ways to append data, and a Flush() function.
+//
+// Example:
+//
+//   string my_data;
+//   ...
+//   ByteSink* sink = ...
+//   sink->Append(my_data.data(), my_data.size());
+//   sink->Flush();
+//
+class LIBPROTOBUF_EXPORT ByteSink {
+ public:
+  ByteSink() {}
+  virtual ~ByteSink() {}
+
+  // Appends the "n" bytes starting at "bytes".
+  virtual void Append(const char* bytes, size_t n) = 0;
+
+  // Flushes internal buffers. The default implemenation does nothing. ByteSink
+  // subclasses may use internal buffers that require calling Flush() at the end
+  // of the stream.
+  virtual void Flush();
+
+ private:
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ByteSink);
+};
+
+// An abstract interface for an object that produces a fixed-size sequence of
+// bytes.
+//
+// Example:
+//
+//   ByteSource* source = ...
+//   while (source->Available() > 0) {
+//     StringPiece data = source->Peek();
+//     ... do something with "data" ...
+//     source->Skip(data.length());
+//   }
+//
+class LIBPROTOBUF_EXPORT ByteSource {
+ public:
+  ByteSource() {}
+  virtual ~ByteSource() {}
+
+  // Returns the number of bytes left to read from the source. Available()
+  // should decrease by N each time Skip(N) is called. Available() may not
+  // increase. Available() returning 0 indicates that the ByteSource is
+  // exhausted.
+  //
+  // Note: Size() may have been a more appropriate name as it's more
+  //       indicative of the fixed-size nature of a ByteSource.
+  virtual size_t Available() const = 0;
+
+  // Returns a StringPiece of the next contiguous region of the source. Does not
+  // reposition the source. The returned region is empty iff Available() == 0.
+  //
+  // The returned region is valid until the next call to Skip() or until this
+  // object is destroyed, whichever occurs first.
+  //
+  // The length of the returned StringPiece will be <= Available().
+  virtual StringPiece Peek() = 0;
+
+  // Skips the next n bytes. Invalidates any StringPiece returned by a previous
+  // call to Peek().
+  //
+  // REQUIRES: Available() >= n
+  virtual void Skip(size_t n) = 0;
+
+  // Writes the next n bytes in this ByteSource to the given ByteSink, and
+  // advances this ByteSource past the copied bytes. The default implementation
+  // of this method just copies the bytes normally, but subclasses might
+  // override CopyTo to optimize certain cases.
+  //
+  // REQUIRES: Available() >= n
+  virtual void CopyTo(ByteSink* sink, size_t n);
+
+ private:
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ByteSource);
+};
+
+//
+// Some commonly used implementations of ByteSink
+//
+
+// Implementation of ByteSink that writes to an unsized byte array. No
+// bounds-checking is performed--it is the caller's responsibility to ensure
+// that the destination array is large enough.
+//
+// Example:
+//
+//   char buf[10];
+//   UncheckedArrayByteSink sink(buf);
+//   sink.Append("hi", 2);    // OK
+//   sink.Append(data, 100);  // WOOPS! Overflows buf[10].
+//
+class LIBPROTOBUF_EXPORT UncheckedArrayByteSink : public ByteSink {
+ public:
+  explicit UncheckedArrayByteSink(char* dest) : dest_(dest) {}
+  virtual void Append(const char* data, size_t n);
+
+  // Returns the current output pointer so that a caller can see how many bytes
+  // were produced.
+  //
+  // Note: this method is not part of the ByteSink interface.
+  char* CurrentDestination() const { return dest_; }
+
+ private:
+  char* dest_;
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(UncheckedArrayByteSink);
+};
+
+// Implementation of ByteSink that writes to a sized byte array. This sink will
+// not write more than "capacity" bytes to outbuf. Once "capacity" bytes are
+// appended, subsequent bytes will be ignored and Overflowed() will return true.
+// Overflowed() does not cause a runtime error (i.e., it does not CHECK fail).
+//
+// Example:
+//
+//   char buf[10];
+//   CheckedArrayByteSink sink(buf, 10);
+//   sink.Append("hi", 2);    // OK
+//   sink.Append(data, 100);  // Will only write 8 more bytes
+//
+class LIBPROTOBUF_EXPORT CheckedArrayByteSink : public ByteSink {
+ public:
+  CheckedArrayByteSink(char* outbuf, size_t capacity);
+  virtual void Append(const char* bytes, size_t n);
+
+  // Returns the number of bytes actually written to the sink.
+  size_t NumberOfBytesWritten() const { return size_; }
+
+  // Returns true if any bytes were discarded, i.e., if there was an
+  // attempt to write more than 'capacity' bytes.
+  bool Overflowed() const { return overflowed_; }
+
+ private:
+  char* outbuf_;
+  const size_t capacity_;
+  size_t size_;
+  bool overflowed_;
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CheckedArrayByteSink);
+};
+
+// Implementation of ByteSink that allocates an internal buffer (a char array)
+// and expands it as needed to accomodate appended data (similar to a string),
+// and allows the caller to take ownership of the internal buffer via the
+// GetBuffer() method. The buffer returned from GetBuffer() must be deleted by
+// the caller with delete[]. GetBuffer() also sets the internal buffer to be
+// empty, and subsequent appends to the sink will create a new buffer. The
+// destructor will free the internal buffer if GetBuffer() was not called.
+//
+// Example:
+//
+//   GrowingArrayByteSink sink(10);
+//   sink.Append("hi", 2);
+//   sink.Append(data, n);
+//   const char* buf = sink.GetBuffer();  // Ownership transferred
+//   delete[] buf;
+//
+class LIBPROTOBUF_EXPORT GrowingArrayByteSink : public strings::ByteSink {
+ public:
+  explicit GrowingArrayByteSink(size_t estimated_size);
+  virtual ~GrowingArrayByteSink();
+  virtual void Append(const char* bytes, size_t n);
+
+  // Returns the allocated buffer, and sets nbytes to its size. The caller takes
+  // ownership of the buffer and must delete it with delete[].
+  char* GetBuffer(size_t* nbytes);
+
+ private:
+  void Expand(size_t amount);
+  void ShrinkToFit();
+
+  size_t capacity_;
+  char* buf_;
+  size_t size_;
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GrowingArrayByteSink);
+};
+
+// Implementation of ByteSink that appends to the given string.
+// Existing contents of "dest" are not modified; new data is appended.
+//
+// Example:
+//
+//   string dest = "Hello ";
+//   StringByteSink sink(&dest);
+//   sink.Append("World", 5);
+//   assert(dest == "Hello World");
+//
+class LIBPROTOBUF_EXPORT StringByteSink : public ByteSink {
+ public:
+  explicit StringByteSink(string* dest) : dest_(dest) {}
+  virtual void Append(const char* data, size_t n);
+
+ private:
+  string* dest_;
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(StringByteSink);
+};
+
+// Implementation of ByteSink that discards all data.
+//
+// Example:
+//
+//   NullByteSink sink;
+//   sink.Append(data, data.size());  // All data ignored.
+//
+class LIBPROTOBUF_EXPORT NullByteSink : public ByteSink {
+ public:
+  NullByteSink() {}
+  virtual void Append(const char *data, size_t n) {}
+
+ private:
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(NullByteSink);
+};
+
+//
+// Some commonly used implementations of ByteSource
+//
+
+// Implementation of ByteSource that reads from a StringPiece.
+//
+// Example:
+//
+//   string data = "Hello";
+//   ArrayByteSource source(data);
+//   assert(source.Available() == 5);
+//   assert(source.Peek() == "Hello");
+//
+class LIBPROTOBUF_EXPORT ArrayByteSource : public ByteSource {
+ public:
+  explicit ArrayByteSource(StringPiece s) : input_(s) {}
+
+  virtual size_t Available() const;
+  virtual StringPiece Peek();
+  virtual void Skip(size_t n);
+
+ private:
+  StringPiece   input_;
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayByteSource);
+};
+
+// Implementation of ByteSource that wraps another ByteSource, limiting the
+// number of bytes returned.
+//
+// The caller maintains ownership of the underlying source, and may not use the
+// underlying source while using the LimitByteSource object.  The underlying
+// source's pointer is advanced by n bytes every time this LimitByteSource
+// object is advanced by n.
+//
+// Example:
+//
+//   string data = "Hello World";
+//   ArrayByteSource abs(data);
+//   assert(abs.Available() == data.size());
+//
+//   LimitByteSource limit(abs, 5);
+//   assert(limit.Available() == 5);
+//   assert(limit.Peek() == "Hello");
+//
+class LIBPROTOBUF_EXPORT LimitByteSource : public ByteSource {
+ public:
+  // Returns at most "limit" bytes from "source".
+  LimitByteSource(ByteSource* source, size_t limit);
+
+  virtual size_t Available() const;
+  virtual StringPiece Peek();
+  virtual void Skip(size_t n);
+
+  // We override CopyTo so that we can forward to the underlying source, in
+  // case it has an efficient implementation of CopyTo.
+  virtual void CopyTo(ByteSink* sink, size_t n);
+
+ private:
+  ByteSource* source_;
+  size_t limit_;
+};
+
+}  // namespace strings
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_
diff --git a/src/google/protobuf/stubs/bytestream_unittest.cc b/src/google/protobuf/stubs/bytestream_unittest.cc
new file mode 100644
index 0000000..06f114a
--- /dev/null
+++ b/src/google/protobuf/stubs/bytestream_unittest.cc
@@ -0,0 +1,146 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <google/protobuf/stubs/bytestream.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <algorithm>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+namespace strings {
+namespace {
+
+// We use this class instead of ArrayByteSource to simulate a ByteSource that
+// contains multiple fragments.  ArrayByteSource returns the entire array in
+// one fragment.
+class MockByteSource : public ByteSource {
+ public:
+  MockByteSource(StringPiece data, int block_size)
+    : data_(data), block_size_(block_size) {}
+
+  size_t Available() const { return data_.size(); }
+  StringPiece Peek() {
+    return data_.substr(0, block_size_);
+  }
+  void Skip(size_t n) { data_.remove_prefix(n); }
+
+ private:
+  StringPiece data_;
+  int block_size_;
+};
+
+TEST(ByteSourceTest, CopyTo) {
+  StringPiece data("Hello world!");
+  MockByteSource source(data, 3);
+  string str;
+  StringByteSink sink(&str);
+
+  source.CopyTo(&sink, data.size());
+  EXPECT_EQ(data, str);
+}
+
+TEST(ByteSourceTest, CopySubstringTo) {
+  StringPiece data("Hello world!");
+  MockByteSource source(data, 3);
+  source.Skip(1);
+  string str;
+  StringByteSink sink(&str);
+
+  source.CopyTo(&sink, data.size() - 2);
+  EXPECT_EQ(data.substr(1, data.size() - 2), str);
+  EXPECT_EQ("!", source.Peek());
+}
+
+TEST(ByteSourceTest, LimitByteSource) {
+  StringPiece data("Hello world!");
+  MockByteSource source(data, 3);
+  LimitByteSource limit_source(&source, 6);
+  EXPECT_EQ(6, limit_source.Available());
+  limit_source.Skip(1);
+  EXPECT_EQ(5, limit_source.Available());
+
+  {
+    string str;
+    StringByteSink sink(&str);
+    limit_source.CopyTo(&sink, limit_source.Available());
+    EXPECT_EQ("ello ", str);
+    EXPECT_EQ(0, limit_source.Available());
+    EXPECT_EQ(6, source.Available());
+  }
+
+  {
+    string str;
+    StringByteSink sink(&str);
+    source.CopyTo(&sink, source.Available());
+    EXPECT_EQ("world!", str);
+    EXPECT_EQ(0, source.Available());
+  }
+}
+
+TEST(ByteSourceTest, CopyToStringByteSink) {
+  StringPiece data("Hello world!");
+  MockByteSource source(data, 3);
+  string str;
+  StringByteSink sink(&str);
+  source.CopyTo(&sink, data.size());
+  EXPECT_EQ(data, str);
+}
+
+// Verify that ByteSink is subclassable and Flush() overridable.
+class FlushingByteSink : public StringByteSink {
+ public:
+  explicit FlushingByteSink(string* dest) : StringByteSink(dest) {}
+  virtual void Flush() { Append("z", 1); }
+ private:
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FlushingByteSink);
+};
+
+// Write and Flush via the ByteSink superclass interface.
+void WriteAndFlush(ByteSink* s) {
+  s->Append("abc", 3);
+  s->Flush();
+}
+
+TEST(ByteSinkTest, Flush) {
+  string str;
+  FlushingByteSink f_sink(&str);
+  WriteAndFlush(&f_sink);
+  EXPECT_STREQ("abcz", str.c_str());
+}
+
+}  // namespace
+}  // namespace strings
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/callback.h b/src/google/protobuf/stubs/callback.h
new file mode 100644
index 0000000..87271c5
--- /dev/null
+++ b/src/google/protobuf/stubs/callback.h
@@ -0,0 +1,546 @@
+#ifndef GOOGLE_PROTOBUF_STUBS_CALLBACK_H_
+#define GOOGLE_PROTOBUF_STUBS_CALLBACK_H_
+
+#include <google/protobuf/stubs/macros.h>
+#include <google/protobuf/stubs/type_traits.h>
+
+// ===================================================================
+// emulates google3/base/callback.h
+
+namespace google {
+namespace protobuf {
+
+// Abstract interface for a callback.  When calling an RPC, you must provide
+// a Closure to call when the procedure completes.  See the Service interface
+// in service.h.
+//
+// To automatically construct a Closure which calls a particular function or
+// method with a particular set of parameters, use the NewCallback() function.
+// Example:
+//   void FooDone(const FooResponse* response) {
+//     ...
+//   }
+//
+//   void CallFoo() {
+//     ...
+//     // When done, call FooDone() and pass it a pointer to the response.
+//     Closure* callback = NewCallback(&FooDone, response);
+//     // Make the call.
+//     service->Foo(controller, request, response, callback);
+//   }
+//
+// Example that calls a method:
+//   class Handler {
+//    public:
+//     ...
+//
+//     void FooDone(const FooResponse* response) {
+//       ...
+//     }
+//
+//     void CallFoo() {
+//       ...
+//       // When done, call FooDone() and pass it a pointer to the response.
+//       Closure* callback = NewCallback(this, &Handler::FooDone, response);
+//       // Make the call.
+//       service->Foo(controller, request, response, callback);
+//     }
+//   };
+//
+// Currently NewCallback() supports binding zero, one, or two arguments.
+//
+// Callbacks created with NewCallback() automatically delete themselves when
+// executed.  They should be used when a callback is to be called exactly
+// once (usually the case with RPC callbacks).  If a callback may be called
+// a different number of times (including zero), create it with
+// NewPermanentCallback() instead.  You are then responsible for deleting the
+// callback (using the "delete" keyword as normal).
+//
+// Note that NewCallback() is a bit touchy regarding argument types.  Generally,
+// the values you provide for the parameter bindings must exactly match the
+// types accepted by the callback function.  For example:
+//   void Foo(string s);
+//   NewCallback(&Foo, "foo");          // WON'T WORK:  const char* != string
+//   NewCallback(&Foo, string("foo"));  // WORKS
+// Also note that the arguments cannot be references:
+//   void Foo(const string& s);
+//   string my_str;
+//   NewCallback(&Foo, my_str);  // WON'T WORK:  Can't use referecnes.
+// However, correctly-typed pointers will work just fine.
+class LIBPROTOBUF_EXPORT Closure {
+ public:
+  Closure() {}
+  virtual ~Closure();
+
+  virtual void Run() = 0;
+
+ private:
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Closure);
+};
+
+template<typename R>
+class ResultCallback {
+ public:
+  ResultCallback() {}
+  virtual ~ResultCallback() {}
+
+  virtual R Run() = 0;
+
+ private:
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ResultCallback);
+};
+
+template<typename R, typename A1>
+class LIBPROTOBUF_EXPORT ResultCallback1 {
+ public:
+  ResultCallback1() {}
+  virtual ~ResultCallback1() {}
+
+  virtual R Run(A1) = 0;
+
+ private:
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ResultCallback1);
+};
+
+template<typename R, typename A1, typename A2>
+class LIBPROTOBUF_EXPORT ResultCallback2 {
+ public:
+  ResultCallback2() {}
+  virtual ~ResultCallback2() {}
+
+  virtual R Run(A1,A2) = 0;
+
+ private:
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ResultCallback2);
+};
+
+namespace internal {
+
+class LIBPROTOBUF_EXPORT FunctionClosure0 : public Closure {
+ public:
+  typedef void (*FunctionType)();
+
+  FunctionClosure0(FunctionType function, bool self_deleting)
+    : function_(function), self_deleting_(self_deleting) {}
+  ~FunctionClosure0();
+
+  void Run() {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    function_();
+    if (needs_delete) delete this;
+  }
+
+ private:
+  FunctionType function_;
+  bool self_deleting_;
+};
+
+template <typename Class>
+class MethodClosure0 : public Closure {
+ public:
+  typedef void (Class::*MethodType)();
+
+  MethodClosure0(Class* object, MethodType method, bool self_deleting)
+    : object_(object), method_(method), self_deleting_(self_deleting) {}
+  ~MethodClosure0() {}
+
+  void Run() {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    (object_->*method_)();
+    if (needs_delete) delete this;
+  }
+
+ private:
+  Class* object_;
+  MethodType method_;
+  bool self_deleting_;
+};
+
+template <typename Arg1>
+class FunctionClosure1 : public Closure {
+ public:
+  typedef void (*FunctionType)(Arg1 arg1);
+
+  FunctionClosure1(FunctionType function, bool self_deleting,
+                   Arg1 arg1)
+    : function_(function), self_deleting_(self_deleting),
+      arg1_(arg1) {}
+  ~FunctionClosure1() {}
+
+  void Run() {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    function_(arg1_);
+    if (needs_delete) delete this;
+  }
+
+ private:
+  FunctionType function_;
+  bool self_deleting_;
+  Arg1 arg1_;
+};
+
+template <typename Class, typename Arg1>
+class MethodClosure1 : public Closure {
+ public:
+  typedef void (Class::*MethodType)(Arg1 arg1);
+
+  MethodClosure1(Class* object, MethodType method, bool self_deleting,
+                 Arg1 arg1)
+    : object_(object), method_(method), self_deleting_(self_deleting),
+      arg1_(arg1) {}
+  ~MethodClosure1() {}
+
+  void Run() {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    (object_->*method_)(arg1_);
+    if (needs_delete) delete this;
+  }
+
+ private:
+  Class* object_;
+  MethodType method_;
+  bool self_deleting_;
+  Arg1 arg1_;
+};
+
+template <typename Arg1, typename Arg2>
+class FunctionClosure2 : public Closure {
+ public:
+  typedef void (*FunctionType)(Arg1 arg1, Arg2 arg2);
+
+  FunctionClosure2(FunctionType function, bool self_deleting,
+                   Arg1 arg1, Arg2 arg2)
+    : function_(function), self_deleting_(self_deleting),
+      arg1_(arg1), arg2_(arg2) {}
+  ~FunctionClosure2() {}
+
+  void Run() {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    function_(arg1_, arg2_);
+    if (needs_delete) delete this;
+  }
+
+ private:
+  FunctionType function_;
+  bool self_deleting_;
+  Arg1 arg1_;
+  Arg2 arg2_;
+};
+
+template <typename Class, typename Arg1, typename Arg2>
+class MethodClosure2 : public Closure {
+ public:
+  typedef void (Class::*MethodType)(Arg1 arg1, Arg2 arg2);
+
+  MethodClosure2(Class* object, MethodType method, bool self_deleting,
+                 Arg1 arg1, Arg2 arg2)
+    : object_(object), method_(method), self_deleting_(self_deleting),
+      arg1_(arg1), arg2_(arg2) {}
+  ~MethodClosure2() {}
+
+  void Run() {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    (object_->*method_)(arg1_, arg2_);
+    if (needs_delete) delete this;
+  }
+
+ private:
+  Class* object_;
+  MethodType method_;
+  bool self_deleting_;
+  Arg1 arg1_;
+  Arg2 arg2_;
+};
+
+template<typename R>
+class FunctionResultCallback_0_0 : public ResultCallback<R> {
+ public:
+  typedef R (*FunctionType)();
+
+  FunctionResultCallback_0_0(FunctionType function, bool self_deleting)
+      : function_(function), self_deleting_(self_deleting) {}
+  ~FunctionResultCallback_0_0() {}
+
+  R Run() {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    R result = function_();
+    if (needs_delete) delete this;
+    return result;
+  }
+
+ private:
+  FunctionType function_;
+  bool self_deleting_;
+};
+
+template<typename R, typename P1>
+class FunctionResultCallback_1_0 : public ResultCallback<R> {
+ public:
+  typedef R (*FunctionType)(P1);
+
+  FunctionResultCallback_1_0(FunctionType function, bool self_deleting,
+                             P1 p1)
+      : function_(function), self_deleting_(self_deleting), p1_(p1) {}
+  ~FunctionResultCallback_1_0() {}
+
+  R Run() {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    R result = function_(p1_);
+    if (needs_delete) delete this;
+    return result;
+  }
+
+ private:
+  FunctionType function_;
+  bool self_deleting_;
+  P1 p1_;
+};
+
+template<typename R, typename Arg1>
+class FunctionResultCallback_0_1 : public ResultCallback1<R, Arg1> {
+ public:
+  typedef R (*FunctionType)(Arg1 arg1);
+
+  FunctionResultCallback_0_1(FunctionType function, bool self_deleting)
+      : function_(function), self_deleting_(self_deleting) {}
+  ~FunctionResultCallback_0_1() {}
+
+  R Run(Arg1 a1) {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    R result = function_(a1);
+    if (needs_delete) delete this;
+    return result;
+  }
+
+ private:
+  FunctionType function_;
+  bool self_deleting_;
+};
+
+template<typename R, typename P1, typename A1>
+class FunctionResultCallback_1_1 : public ResultCallback1<R, A1> {
+ public:
+  typedef R (*FunctionType)(P1, A1);
+
+  FunctionResultCallback_1_1(FunctionType function, bool self_deleting,
+                             P1 p1)
+      : function_(function), self_deleting_(self_deleting), p1_(p1) {}
+  ~FunctionResultCallback_1_1() {}
+
+  R Run(A1 a1) {
+    bool needs_delete = self_deleting_;  // read in case callback deletes
+    R result = function_(p1_, a1);
+    if (needs_delete) delete this;
+    return result;
+  }
+
+ private:
+  FunctionType function_;
+  bool self_deleting_;
+  P1 p1_;
+};
+
+template <typename T>
+struct InternalConstRef {
+  typedef typename remove_reference<T>::type base_type;
+  typedef const base_type& type;
+};
+
+template <typename R, typename T, typename P1, typename P2, typename P3,
+          typename P4, typename P5, typename A1, typename A2>
+class MethodResultCallback_5_2 : public ResultCallback2<R, A1, A2> {
+ public:
+  typedef R (T::*MethodType)(P1, P2, P3, P4, P5, A1, A2);
+  MethodResultCallback_5_2(T* object, MethodType method, bool self_deleting,
+                           P1 p1, P2 p2, P3 p3, P4 p4, P5 p5)
+      : object_(object),
+        method_(method),
+        self_deleting_(self_deleting),
+        p1_(p1),
+        p2_(p2),
+        p3_(p3),
+        p4_(p4),
+        p5_(p5) {}
+  ~MethodResultCallback_5_2() {}
+
+  R Run(A1 a1, A2 a2) {
+    bool needs_delete = self_deleting_;
+    R result = (object_->*method_)(p1_, p2_, p3_, p4_, p5_, a1, a2);
+    if (needs_delete) delete this;
+    return result;
+  }
+
+ private:
+  T* object_;
+  MethodType method_;
+  bool self_deleting_;
+  typename remove_reference<P1>::type p1_;
+  typename remove_reference<P2>::type p2_;
+  typename remove_reference<P3>::type p3_;
+  typename remove_reference<P4>::type p4_;
+  typename remove_reference<P5>::type p5_;
+};
+
+// See Closure.
+inline Closure* NewCallback(void (*function)()) {
+  return new internal::FunctionClosure0(function, true);
+}
+
+// See Closure.
+inline Closure* NewPermanentCallback(void (*function)()) {
+  return new internal::FunctionClosure0(function, false);
+}
+
+// See Closure.
+template <typename Class>
+inline Closure* NewCallback(Class* object, void (Class::*method)()) {
+  return new internal::MethodClosure0<Class>(object, method, true);
+}
+
+// See Closure.
+template <typename Class>
+inline Closure* NewPermanentCallback(Class* object, void (Class::*method)()) {
+  return new internal::MethodClosure0<Class>(object, method, false);
+}
+
+// See Closure.
+template <typename Arg1>
+inline Closure* NewCallback(void (*function)(Arg1),
+                            Arg1 arg1) {
+  return new internal::FunctionClosure1<Arg1>(function, true, arg1);
+}
+
+// See Closure.
+template <typename Arg1>
+inline Closure* NewPermanentCallback(void (*function)(Arg1),
+                                     Arg1 arg1) {
+  return new internal::FunctionClosure1<Arg1>(function, false, arg1);
+}
+
+// See Closure.
+template <typename Class, typename Arg1>
+inline Closure* NewCallback(Class* object, void (Class::*method)(Arg1),
+                            Arg1 arg1) {
+  return new internal::MethodClosure1<Class, Arg1>(object, method, true, arg1);
+}
+
+// See Closure.
+template <typename Class, typename Arg1>
+inline Closure* NewPermanentCallback(Class* object, void (Class::*method)(Arg1),
+                                     Arg1 arg1) {
+  return new internal::MethodClosure1<Class, Arg1>(object, method, false, arg1);
+}
+
+// See Closure.
+template <typename Arg1, typename Arg2>
+inline Closure* NewCallback(void (*function)(Arg1, Arg2),
+                            Arg1 arg1, Arg2 arg2) {
+  return new internal::FunctionClosure2<Arg1, Arg2>(
+    function, true, arg1, arg2);
+}
+
+// See Closure.
+template <typename Arg1, typename Arg2>
+inline Closure* NewPermanentCallback(void (*function)(Arg1, Arg2),
+                                     Arg1 arg1, Arg2 arg2) {
+  return new internal::FunctionClosure2<Arg1, Arg2>(
+    function, false, arg1, arg2);
+}
+
+// See Closure.
+template <typename Class, typename Arg1, typename Arg2>
+inline Closure* NewCallback(Class* object, void (Class::*method)(Arg1, Arg2),
+                            Arg1 arg1, Arg2 arg2) {
+  return new internal::MethodClosure2<Class, Arg1, Arg2>(
+    object, method, true, arg1, arg2);
+}
+
+// See Closure.
+template <typename Class, typename Arg1, typename Arg2>
+inline Closure* NewPermanentCallback(
+    Class* object, void (Class::*method)(Arg1, Arg2),
+    Arg1 arg1, Arg2 arg2) {
+  return new internal::MethodClosure2<Class, Arg1, Arg2>(
+    object, method, false, arg1, arg2);
+}
+
+// See ResultCallback
+template<typename R>
+inline ResultCallback<R>* NewCallback(R (*function)()) {
+  return new internal::FunctionResultCallback_0_0<R>(function, true);
+}
+
+// See ResultCallback
+template<typename R>
+inline ResultCallback<R>* NewPermanentCallback(R (*function)()) {
+  return new internal::FunctionResultCallback_0_0<R>(function, false);
+}
+
+// See ResultCallback
+template<typename R, typename P1>
+inline ResultCallback<R>* NewCallback(R (*function)(P1), P1 p1) {
+  return new internal::FunctionResultCallback_1_0<R, P1>(
+      function, true, p1);
+}
+
+// See ResultCallback
+template<typename R, typename P1>
+inline ResultCallback<R>* NewPermanentCallback(
+    R (*function)(P1), P1 p1) {
+  return new internal::FunctionResultCallback_1_0<R, P1>(
+      function, false, p1);
+}
+
+// See ResultCallback1
+template<typename R, typename A1>
+inline ResultCallback1<R, A1>* NewCallback(R (*function)(A1)) {
+  return new internal::FunctionResultCallback_0_1<R, A1>(function, true);
+}
+
+// See ResultCallback1
+template<typename R, typename A1>
+inline ResultCallback1<R, A1>* NewPermanentCallback(R (*function)(A1)) {
+  return new internal::FunctionResultCallback_0_1<R, A1>(function, false);
+}
+
+// See ResultCallback1
+template<typename R, typename P1, typename A1>
+inline ResultCallback1<R, A1>* NewCallback(R (*function)(P1, A1), P1 p1) {
+  return new internal::FunctionResultCallback_1_1<R, P1, A1>(
+      function, true, p1);
+}
+
+// See ResultCallback1
+template<typename R, typename P1, typename A1>
+inline ResultCallback1<R, A1>* NewPermanentCallback(
+    R (*function)(P1, A1), P1 p1) {
+  return new internal::FunctionResultCallback_1_1<R, P1, A1>(
+      function, false, p1);
+}
+
+// See MethodResultCallback_5_2
+template <typename R, typename T, typename P1, typename P2, typename P3,
+          typename P4, typename P5, typename A1, typename A2>
+inline ResultCallback2<R, A1, A2>* NewPermanentCallback(
+    T* object, R (T::*function)(P1, P2, P3, P4, P5, A1, A2),
+    typename internal::InternalConstRef<P1>::type p1,
+    typename internal::InternalConstRef<P2>::type p2,
+    typename internal::InternalConstRef<P3>::type p3,
+    typename internal::InternalConstRef<P4>::type p4,
+    typename internal::InternalConstRef<P5>::type p5) {
+  return new internal::MethodResultCallback_5_2<R, T, P1, P2, P3, P4, P5, A1,
+                                                A2>(object, function, false, p1,
+                                                    p2, p3, p4, p5);
+}
+
+}  // namespace internal
+
+// A function which does nothing.  Useful for creating no-op callbacks, e.g.:
+//   Closure* nothing = NewCallback(&DoNothing);
+void LIBPROTOBUF_EXPORT DoNothing();
+
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_CALLBACK_H_
diff --git a/src/google/protobuf/stubs/casts.h b/src/google/protobuf/stubs/casts.h
new file mode 100644
index 0000000..be65284
--- /dev/null
+++ b/src/google/protobuf/stubs/casts.h
@@ -0,0 +1,133 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2014 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GOOGLE_PROTOBUF_CASTS_H__
+#define GOOGLE_PROTOBUF_CASTS_H__
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/type_traits.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+// Use implicit_cast as a safe version of static_cast or const_cast
+// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
+// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
+// a const pointer to Foo).
+// When you use implicit_cast, the compiler checks that the cast is safe.
+// Such explicit implicit_casts are necessary in surprisingly many
+// situations where C++ demands an exact type match instead of an
+// argument type convertable to a target type.
+//
+// The From type can be inferred, so the preferred syntax for using
+// implicit_cast is the same as for static_cast etc.:
+//
+//   implicit_cast<ToType>(expr)
+//
+// implicit_cast would have been part of the C++ standard library,
+// but the proposal was submitted too late.  It will probably make
+// its way into the language in the future.
+template<typename To, typename From>
+inline To implicit_cast(From const &f) {
+  return f;
+}
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use implicit_cast<>, since upcasts
+// always succeed.  When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo?  It
+// could be a bare Foo, or of type DifferentSubclassOfFoo.  Thus,
+// when you downcast, you should use this macro.  In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not).  In normal mode, we do the efficient static_cast<>
+// instead.  Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+//    This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+//    if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+//    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+
+template<typename To, typename From>     // use like this: down_cast<T*>(foo);
+inline To down_cast(From* f) {                   // so we only accept pointers
+  // Ensures that To is a sub-type of From *.  This test is here only
+  // for compile-time type checking, and has no overhead in an
+  // optimized build at run-time, as it will be optimized away
+  // completely.
+  if (false) {
+    implicit_cast<From*, To>(0);
+  }
+
+#if !defined(NDEBUG) && !defined(GOOGLE_PROTOBUF_NO_RTTI)
+  assert(f == NULL || dynamic_cast<To>(f) != NULL);  // RTTI: debug mode only!
+#endif
+  return static_cast<To>(f);
+}
+
+template<typename To, typename From>    // use like this: down_cast<T&>(foo);
+inline To down_cast(From& f) {
+  typedef typename remove_reference<To>::type* ToAsPointer;
+  // Ensures that To is a sub-type of From *.  This test is here only
+  // for compile-time type checking, and has no overhead in an
+  // optimized build at run-time, as it will be optimized away
+  // completely.
+  if (false) {
+    implicit_cast<From*, ToAsPointer>(0);
+  }
+
+#if !defined(NDEBUG) && !defined(GOOGLE_PROTOBUF_NO_RTTI)
+  // RTTI: debug mode only!
+  assert(dynamic_cast<ToAsPointer>(&f) != NULL);
+#endif
+  return *static_cast<ToAsPointer>(&f);
+}
+
+template<typename To, typename From>
+inline To bit_cast(const From& from) {
+  GOOGLE_COMPILE_ASSERT(sizeof(From) == sizeof(To),
+                        bit_cast_with_different_sizes);
+  To dest;
+  memcpy(&dest, &from, sizeof(dest));
+  return dest;
+}
+
+}  // namespace internal
+
+// We made these internal so that they would show up as such in the docs,
+// but we don't want to stick "internal::" in front of them everywhere.
+using internal::implicit_cast;
+using internal::down_cast;
+using internal::bit_cast;
+
+}  // namespace protobuf
+}  // namespace google
+#endif  // GOOGLE_PROTOBUF_CASTS_H__
diff --git a/src/google/protobuf/stubs/common.cc b/src/google/protobuf/stubs/common.cc
new file mode 100644
index 0000000..54dbafa
--- /dev/null
+++ b/src/google/protobuf/stubs/common.cc
@@ -0,0 +1,459 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/once.h>
+#include <google/protobuf/stubs/status.h>
+#include <google/protobuf/stubs/stringpiece.h>
+#include <google/protobuf/stubs/strutil.h>
+#include <google/protobuf/stubs/int128.h>
+#include <errno.h>
+#include <sstream>
+#include <stdio.h>
+#include <vector>
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN  // We only need minimal includes
+#include <windows.h>
+#define snprintf _snprintf    // see comment in strutil.cc
+#elif defined(HAVE_PTHREAD)
+#include <pthread.h>
+#else
+#error "No suitable threading library available."
+#endif
+#if defined(__ANDROID__)
+#include <android/log.h>
+#endif
+
+namespace google {
+namespace protobuf {
+
+namespace internal {
+
+void VerifyVersion(int headerVersion,
+                   int minLibraryVersion,
+                   const char* filename) {
+  if (GOOGLE_PROTOBUF_VERSION < minLibraryVersion) {
+    // Library is too old for headers.
+    GOOGLE_LOG(FATAL)
+      << "This program requires version " << VersionString(minLibraryVersion)
+      << " of the Protocol Buffer runtime library, but the installed version "
+         "is " << VersionString(GOOGLE_PROTOBUF_VERSION) << ".  Please update "
+         "your library.  If you compiled the program yourself, make sure that "
+         "your headers are from the same version of Protocol Buffers as your "
+         "link-time library.  (Version verification failed in \""
+      << filename << "\".)";
+  }
+  if (headerVersion < kMinHeaderVersionForLibrary) {
+    // Headers are too old for library.
+    GOOGLE_LOG(FATAL)
+      << "This program was compiled against version "
+      << VersionString(headerVersion) << " of the Protocol Buffer runtime "
+         "library, which is not compatible with the installed version ("
+      << VersionString(GOOGLE_PROTOBUF_VERSION) <<  ").  Contact the program "
+         "author for an update.  If you compiled the program yourself, make "
+         "sure that your headers are from the same version of Protocol Buffers "
+         "as your link-time library.  (Version verification failed in \""
+      << filename << "\".)";
+  }
+}
+
+string VersionString(int version) {
+  int major = version / 1000000;
+  int minor = (version / 1000) % 1000;
+  int micro = version % 1000;
+
+  // 128 bytes should always be enough, but we use snprintf() anyway to be
+  // safe.
+  char buffer[128];
+  snprintf(buffer, sizeof(buffer), "%d.%d.%d", major, minor, micro);
+
+  // Guard against broken MSVC snprintf().
+  buffer[sizeof(buffer)-1] = '\0';
+
+  return buffer;
+}
+
+}  // namespace internal
+
+// ===================================================================
+// emulates google3/base/logging.cc
+
+namespace internal {
+#if defined(__ANDROID__)
+inline void DefaultLogHandler(LogLevel level, const char* filename, int line,
+                              const string& message) {
+#ifdef GOOGLE_PROTOBUF_MIN_LOG_LEVEL
+  if (level < GOOGLE_PROTOBUF_MIN_LOG_LEVEL) {
+    return;
+  }
+  static const char* level_names[] = {"INFO", "WARNING", "ERROR", "FATAL"};
+
+  static const int android_log_levels[] = {
+      ANDROID_LOG_INFO,   // LOG(INFO),
+      ANDROID_LOG_WARN,   // LOG(WARNING)
+      ANDROID_LOG_ERROR,  // LOG(ERROR)
+      ANDROID_LOG_FATAL,  // LOG(FATAL)
+  };
+
+  // Bound the logging level.
+  const int android_log_level = android_log_levels[level];
+  ::std::ostringstream ostr;
+  ostr << "[libprotobuf " << level_names[level] << " " << filename << ":"
+       << line << "] " << message.c_str();
+
+  // Output the log string the Android log at the appropriate level.
+  __android_log_write(android_log_level, "libprotobuf-native",
+                      ostr.str().c_str());
+  // Also output to std::cerr.
+  fprintf(stderr, "%s", ostr.str().c_str());
+  fflush(stderr);
+
+  // Indicate termination if needed.
+  if (android_log_level == ANDROID_LOG_FATAL) {
+    __android_log_write(ANDROID_LOG_FATAL, "libprotobuf-native",
+                        "terminating.\n");
+  }
+#endif
+}
+#else
+void DefaultLogHandler(LogLevel level, const char* filename, int line,
+                       const string& message) {
+  static const char* level_names[] = { "INFO", "WARNING", "ERROR", "FATAL" };
+
+  // We use fprintf() instead of cerr because we want this to work at static
+  // initialization time.
+  fprintf(stderr, "[libprotobuf %s %s:%d] %s\n",
+          level_names[level], filename, line, message.c_str());
+  fflush(stderr);  // Needed on MSVC.
+}
+#endif
+
+void NullLogHandler(LogLevel /* level */, const char* /* filename */,
+                    int /* line */, const string& /* message */) {
+  // Nothing.
+}
+
+static LogHandler* log_handler_ = &DefaultLogHandler;
+static int log_silencer_count_ = 0;
+
+static Mutex* log_silencer_count_mutex_ = NULL;
+GOOGLE_PROTOBUF_DECLARE_ONCE(log_silencer_count_init_);
+
+void DeleteLogSilencerCount() {
+  delete log_silencer_count_mutex_;
+  log_silencer_count_mutex_ = NULL;
+}
+void InitLogSilencerCount() {
+  log_silencer_count_mutex_ = new Mutex;
+  OnShutdown(&DeleteLogSilencerCount);
+}
+void InitLogSilencerCountOnce() {
+  GoogleOnceInit(&log_silencer_count_init_, &InitLogSilencerCount);
+}
+
+LogMessage& LogMessage::operator<<(const string& value) {
+  message_ += value;
+  return *this;
+}
+
+LogMessage& LogMessage::operator<<(const char* value) {
+  message_ += value;
+  return *this;
+}
+
+LogMessage& LogMessage::operator<<(const StringPiece& value) {
+  message_ += value.ToString();
+  return *this;
+}
+
+LogMessage& LogMessage::operator<<(
+    const ::google::protobuf::util::Status& status) {
+  message_ += status.ToString();
+  return *this;
+}
+
+LogMessage& LogMessage::operator<<(const uint128& value) {
+  std::ostringstream str;
+  str << value;
+  message_ += str.str();
+  return *this;
+}
+
+// Since this is just for logging, we don't care if the current locale changes
+// the results -- in fact, we probably prefer that.  So we use snprintf()
+// instead of Simple*toa().
+#undef DECLARE_STREAM_OPERATOR
+#define DECLARE_STREAM_OPERATOR(TYPE, FORMAT)                       \
+  LogMessage& LogMessage::operator<<(TYPE value) {                  \
+    /* 128 bytes should be big enough for any of the primitive */   \
+    /* values which we print with this, but well use snprintf() */  \
+    /* anyway to be extra safe. */                                  \
+    char buffer[128];                                               \
+    snprintf(buffer, sizeof(buffer), FORMAT, value);                \
+    /* Guard against broken MSVC snprintf(). */                     \
+    buffer[sizeof(buffer)-1] = '\0';                                \
+    message_ += buffer;                                             \
+    return *this;                                                   \
+  }
+
+DECLARE_STREAM_OPERATOR(char         , "%c" )
+DECLARE_STREAM_OPERATOR(int          , "%d" )
+DECLARE_STREAM_OPERATOR(unsigned int , "%u" )
+DECLARE_STREAM_OPERATOR(long         , "%ld")
+DECLARE_STREAM_OPERATOR(unsigned long, "%lu")
+DECLARE_STREAM_OPERATOR(double       , "%g" )
+DECLARE_STREAM_OPERATOR(void*        , "%p" )
+DECLARE_STREAM_OPERATOR(long long         , "%" GOOGLE_LL_FORMAT "d")
+DECLARE_STREAM_OPERATOR(unsigned long long, "%" GOOGLE_LL_FORMAT "u")
+#undef DECLARE_STREAM_OPERATOR
+
+LogMessage::LogMessage(LogLevel level, const char* filename, int line)
+  : level_(level), filename_(filename), line_(line) {}
+LogMessage::~LogMessage() {}
+
+void LogMessage::Finish() {
+  bool suppress = false;
+
+  if (level_ != LOGLEVEL_FATAL) {
+    InitLogSilencerCountOnce();
+    MutexLock lock(log_silencer_count_mutex_);
+    suppress = log_silencer_count_ > 0;
+  }
+
+  if (!suppress) {
+    log_handler_(level_, filename_, line_, message_);
+  }
+
+  if (level_ == LOGLEVEL_FATAL) {
+#if PROTOBUF_USE_EXCEPTIONS
+    throw FatalException(filename_, line_, message_);
+#else
+    abort();
+#endif
+  }
+}
+
+void LogFinisher::operator=(LogMessage& other) {
+  other.Finish();
+}
+
+}  // namespace internal
+
+LogHandler* SetLogHandler(LogHandler* new_func) {
+  LogHandler* old = internal::log_handler_;
+  if (old == &internal::NullLogHandler) {
+    old = NULL;
+  }
+  if (new_func == NULL) {
+    internal::log_handler_ = &internal::NullLogHandler;
+  } else {
+    internal::log_handler_ = new_func;
+  }
+  return old;
+}
+
+LogSilencer::LogSilencer() {
+  internal::InitLogSilencerCountOnce();
+  MutexLock lock(internal::log_silencer_count_mutex_);
+  ++internal::log_silencer_count_;
+};
+
+LogSilencer::~LogSilencer() {
+  internal::InitLogSilencerCountOnce();
+  MutexLock lock(internal::log_silencer_count_mutex_);
+  --internal::log_silencer_count_;
+};
+
+// ===================================================================
+// emulates google3/base/callback.cc
+
+Closure::~Closure() {}
+
+namespace internal { FunctionClosure0::~FunctionClosure0() {} }
+
+void DoNothing() {}
+
+// ===================================================================
+// emulates google3/base/mutex.cc
+
+#ifdef _WIN32
+
+struct Mutex::Internal {
+  CRITICAL_SECTION mutex;
+#ifndef NDEBUG
+  // Used only to implement AssertHeld().
+  DWORD thread_id;
+#endif
+};
+
+Mutex::Mutex()
+  : mInternal(new Internal) {
+  InitializeCriticalSection(&mInternal->mutex);
+}
+
+Mutex::~Mutex() {
+  DeleteCriticalSection(&mInternal->mutex);
+  delete mInternal;
+}
+
+void Mutex::Lock() {
+  EnterCriticalSection(&mInternal->mutex);
+#ifndef NDEBUG
+  mInternal->thread_id = GetCurrentThreadId();
+#endif
+}
+
+void Mutex::Unlock() {
+#ifndef NDEBUG
+  mInternal->thread_id = 0;
+#endif
+  LeaveCriticalSection(&mInternal->mutex);
+}
+
+void Mutex::AssertHeld() {
+#ifndef NDEBUG
+  GOOGLE_DCHECK_EQ(mInternal->thread_id, GetCurrentThreadId());
+#endif
+}
+
+#elif defined(HAVE_PTHREAD)
+
+struct Mutex::Internal {
+  pthread_mutex_t mutex;
+};
+
+Mutex::Mutex()
+  : mInternal(new Internal) {
+  pthread_mutex_init(&mInternal->mutex, NULL);
+}
+
+Mutex::~Mutex() {
+  pthread_mutex_destroy(&mInternal->mutex);
+  delete mInternal;
+}
+
+void Mutex::Lock() {
+  int result = pthread_mutex_lock(&mInternal->mutex);
+  if (result != 0) {
+    GOOGLE_LOG(FATAL) << "pthread_mutex_lock: " << strerror(result);
+  }
+}
+
+void Mutex::Unlock() {
+  int result = pthread_mutex_unlock(&mInternal->mutex);
+  if (result != 0) {
+    GOOGLE_LOG(FATAL) << "pthread_mutex_unlock: " << strerror(result);
+  }
+}
+
+void Mutex::AssertHeld() {
+  // pthreads dosn't provide a way to check which thread holds the mutex.
+  // TODO(kenton):  Maybe keep track of locking thread ID like with WIN32?
+}
+
+#endif
+
+// ===================================================================
+// emulates google3/util/endian/endian.h
+//
+// TODO(xiaofeng): PROTOBUF_LITTLE_ENDIAN is unfortunately defined in
+// google/protobuf/io/coded_stream.h and therefore can not be used here.
+// Maybe move that macro definition here in the furture.
+uint32 ghtonl(uint32 x) {
+  union {
+    uint32 result;
+    uint8 result_array[4];
+  };
+  result_array[0] = static_cast<uint8>(x >> 24);
+  result_array[1] = static_cast<uint8>((x >> 16) & 0xFF);
+  result_array[2] = static_cast<uint8>((x >> 8) & 0xFF);
+  result_array[3] = static_cast<uint8>(x & 0xFF);
+  return result;
+}
+
+// ===================================================================
+// Shutdown support.
+
+namespace internal {
+
+typedef void OnShutdownFunc();
+vector<void (*)()>* shutdown_functions = NULL;
+Mutex* shutdown_functions_mutex = NULL;
+GOOGLE_PROTOBUF_DECLARE_ONCE(shutdown_functions_init);
+
+void InitShutdownFunctions() {
+  shutdown_functions = new vector<void (*)()>;
+  shutdown_functions_mutex = new Mutex;
+}
+
+inline void InitShutdownFunctionsOnce() {
+  GoogleOnceInit(&shutdown_functions_init, &InitShutdownFunctions);
+}
+
+void OnShutdown(void (*func)()) {
+  InitShutdownFunctionsOnce();
+  MutexLock lock(shutdown_functions_mutex);
+  shutdown_functions->push_back(func);
+}
+
+}  // namespace internal
+
+void ShutdownProtobufLibrary() {
+  internal::InitShutdownFunctionsOnce();
+
+  // We don't need to lock shutdown_functions_mutex because it's up to the
+  // caller to make sure that no one is using the library before this is
+  // called.
+
+  // Make it safe to call this multiple times.
+  if (internal::shutdown_functions == NULL) return;
+
+  for (int i = 0; i < internal::shutdown_functions->size(); i++) {
+    internal::shutdown_functions->at(i)();
+  }
+  delete internal::shutdown_functions;
+  internal::shutdown_functions = NULL;
+  delete internal::shutdown_functions_mutex;
+  internal::shutdown_functions_mutex = NULL;
+}
+
+#if PROTOBUF_USE_EXCEPTIONS
+FatalException::~FatalException() throw() {}
+
+const char* FatalException::what() const throw() {
+  return message_.c_str();
+}
+#endif
+
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/common.h b/src/google/protobuf/stubs/common.h
new file mode 100644
index 0000000..9c05cac
--- /dev/null
+++ b/src/google/protobuf/stubs/common.h
@@ -0,0 +1,225 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda) and others
+//
+// Contains basic types and utilities used by the rest of the library.
+
+#ifndef GOOGLE_PROTOBUF_COMMON_H__
+#define GOOGLE_PROTOBUF_COMMON_H__
+
+#include <string>
+
+#include <google/protobuf/stubs/port.h>
+#include <google/protobuf/stubs/macros.h>
+#include <google/protobuf/stubs/platform_macros.h>
+
+// TODO(liujisi): Remove the following includes after the include clean-up.
+#include <google/protobuf/stubs/logging.h>
+#include <google/protobuf/stubs/scoped_ptr.h>
+#include <google/protobuf/stubs/mutex.h>
+#include <google/protobuf/stubs/callback.h>
+
+#ifndef PROTOBUF_USE_EXCEPTIONS
+#if defined(_MSC_VER) && defined(_CPPUNWIND)
+  #define PROTOBUF_USE_EXCEPTIONS 1
+#elif defined(__EXCEPTIONS)
+  #define PROTOBUF_USE_EXCEPTIONS 1
+#else
+  #define PROTOBUF_USE_EXCEPTIONS 0
+#endif
+#endif
+
+#if PROTOBUF_USE_EXCEPTIONS
+#include <exception>
+#endif
+#if defined(__APPLE__)
+#include <TargetConditionals.h>  // for TARGET_OS_IPHONE
+#endif
+
+#if defined(__ANDROID__) || defined(GOOGLE_PROTOBUF_OS_ANDROID) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(GOOGLE_PROTOBUF_OS_IPHONE)
+#include <pthread.h>
+#endif
+
+#if defined(_WIN32) && defined(GetMessage)
+// Allow GetMessage to be used as a valid method name in protobuf classes.
+// windows.h defines GetMessage() as a macro.  Let's re-define it as an inline
+// function.  The inline function should be equivalent for C++ users.
+inline BOOL GetMessage_Win32(
+    LPMSG lpMsg, HWND hWnd,
+    UINT wMsgFilterMin, UINT wMsgFilterMax) {
+  return GetMessage(lpMsg, hWnd, wMsgFilterMin, wMsgFilterMax);
+}
+#undef GetMessage
+inline BOOL GetMessage(
+    LPMSG lpMsg, HWND hWnd,
+    UINT wMsgFilterMin, UINT wMsgFilterMax) {
+  return GetMessage_Win32(lpMsg, hWnd, wMsgFilterMin, wMsgFilterMax);
+}
+#endif
+
+namespace std {}
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// Some of these constants are macros rather than const ints so that they can
+// be used in #if directives.
+
+// The current version, represented as a single integer to make comparison
+// easier:  major * 10^6 + minor * 10^3 + micro
+#define GOOGLE_PROTOBUF_VERSION 3000000
+
+// The minimum library version which works with the current version of the
+// headers.
+#define GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION 3000000
+
+// The minimum header version which works with the current version of
+// the library.  This constant should only be used by protoc's C++ code
+// generator.
+static const int kMinHeaderVersionForLibrary = 3000000;
+
+// The minimum protoc version which works with the current version of the
+// headers.
+#define GOOGLE_PROTOBUF_MIN_PROTOC_VERSION 3000000
+
+// The minimum header version which works with the current version of
+// protoc.  This constant should only be used in VerifyVersion().
+static const int kMinHeaderVersionForProtoc = 3000000;
+
+// Verifies that the headers and libraries are compatible.  Use the macro
+// below to call this.
+void LIBPROTOBUF_EXPORT VerifyVersion(int headerVersion, int minLibraryVersion,
+                                      const char* filename);
+
+// Converts a numeric version number to a string.
+std::string LIBPROTOBUF_EXPORT VersionString(int version);
+
+}  // namespace internal
+
+// Place this macro in your main() function (or somewhere before you attempt
+// to use the protobuf library) to verify that the version you link against
+// matches the headers you compiled against.  If a version mismatch is
+// detected, the process will abort.
+#define GOOGLE_PROTOBUF_VERIFY_VERSION                                    \
+  ::google::protobuf::internal::VerifyVersion(                            \
+    GOOGLE_PROTOBUF_VERSION, GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION,         \
+    __FILE__)
+
+
+// ===================================================================
+// from google3/util/utf8/public/unilib.h
+
+class StringPiece;
+namespace internal {
+
+// Checks if the buffer contains structurally-valid UTF-8.  Implemented in
+// structurally_valid.cc.
+LIBPROTOBUF_EXPORT bool IsStructurallyValidUTF8(const char* buf, int len);
+
+inline bool IsStructurallyValidUTF8(const std::string& str) {
+  return IsStructurallyValidUTF8(str.data(), static_cast<int>(str.length()));
+}
+
+// Returns initial number of bytes of structually valid UTF-8.
+LIBPROTOBUF_EXPORT int UTF8SpnStructurallyValid(const StringPiece& str);
+
+// Coerce UTF-8 byte string in src_str to be
+// a structurally-valid equal-length string by selectively
+// overwriting illegal bytes with replace_char (typically ' ' or '?').
+// replace_char must be legal printable 7-bit Ascii 0x20..0x7e.
+// src_str is read-only.
+//
+// Returns pointer to output buffer, src_str.data() if no changes were made,
+//  or idst if some bytes were changed. idst is allocated by the caller
+//  and must be at least as big as src_str
+//
+// Optimized for: all structurally valid and no byte copying is done.
+//
+LIBPROTOBUF_EXPORT char* UTF8CoerceToStructurallyValid(
+    const StringPiece& str, char* dst, char replace_char);
+
+}  // namespace internal
+
+
+// ===================================================================
+// Shutdown support.
+
+// Shut down the entire protocol buffers library, deleting all static-duration
+// objects allocated by the library or by generated .pb.cc files.
+//
+// There are two reasons you might want to call this:
+// * You use a draconian definition of "memory leak" in which you expect
+//   every single malloc() to have a corresponding free(), even for objects
+//   which live until program exit.
+// * You are writing a dynamically-loaded library which needs to clean up
+//   after itself when the library is unloaded.
+//
+// It is safe to call this multiple times.  However, it is not safe to use
+// any other part of the protocol buffers library after
+// ShutdownProtobufLibrary() has been called.
+LIBPROTOBUF_EXPORT void ShutdownProtobufLibrary();
+
+namespace internal {
+
+// Register a function to be called when ShutdownProtocolBuffers() is called.
+LIBPROTOBUF_EXPORT void OnShutdown(void (*func)());
+
+}  // namespace internal
+
+#if PROTOBUF_USE_EXCEPTIONS
+class FatalException : public std::exception {
+ public:
+  FatalException(const char* filename, int line, const std::string& message)
+      : filename_(filename), line_(line), message_(message) {}
+  virtual ~FatalException() throw();
+
+  virtual const char* what() const throw();
+
+  const char* filename() const { return filename_; }
+  int line() const { return line_; }
+  const std::string& message() const { return message_; }
+
+ private:
+  const char* filename_;
+  const int line_;
+  const std::string message_;
+};
+#endif
+
+// This is at the end of the file instead of the beginning to work around a bug
+// in some versions of MSVC.
+using namespace std;  // Don't do this at home, kids.
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_COMMON_H__
diff --git a/src/google/protobuf/stubs/common_unittest.cc b/src/google/protobuf/stubs/common_unittest.cc
new file mode 100644
index 0000000..25bae9b
--- /dev/null
+++ b/src/google/protobuf/stubs/common_unittest.cc
@@ -0,0 +1,358 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+
+#include <vector>
+#include <google/protobuf/stubs/casts.h>
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/strutil.h>
+#include <google/protobuf/stubs/substitute.h>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+using internal::NewCallback;
+using internal::NewPermanentCallback;
+namespace {
+
+// TODO(kenton):  More tests.
+
+#ifdef PACKAGE_VERSION  // only defined when using automake, not MSVC
+
+TEST(VersionTest, VersionMatchesConfig) {
+  // Verify that the version string specified in config.h matches the one
+  // in common.h.  The config.h version is a string which may have a suffix
+  // like "beta" or "rc1", so we remove that.
+  string version = PACKAGE_VERSION;
+  int pos = 0;
+  while (pos < version.size() &&
+         (ascii_isdigit(version[pos]) || version[pos] == '.')) {
+    ++pos;
+  }
+  version.erase(pos);
+
+  EXPECT_EQ(version, internal::VersionString(GOOGLE_PROTOBUF_VERSION));
+}
+
+#endif  // PACKAGE_VERSION
+
+TEST(CommonTest, IntMinMaxConstants) {
+  // kint32min was declared incorrectly in the first release of protobufs.
+  // Ugh.
+  EXPECT_LT(kint32min, kint32max);
+  EXPECT_EQ(static_cast<uint32>(kint32min), static_cast<uint32>(kint32max) + 1);
+  EXPECT_LT(kint64min, kint64max);
+  EXPECT_EQ(static_cast<uint64>(kint64min), static_cast<uint64>(kint64max) + 1);
+  EXPECT_EQ(0, kuint32max + 1);
+  EXPECT_EQ(0, kuint64max + 1);
+}
+
+vector<string> captured_messages_;
+
+void CaptureLog(LogLevel level, const char* filename, int line,
+                const string& message) {
+  captured_messages_.push_back(
+    strings::Substitute("$0 $1:$2: $3",
+      implicit_cast<int>(level), filename, line, message));
+}
+
+TEST(LoggingTest, DefaultLogging) {
+  CaptureTestStderr();
+  int line = __LINE__;
+  GOOGLE_LOG(INFO   ) << "A message.";
+  GOOGLE_LOG(WARNING) << "A warning.";
+  GOOGLE_LOG(ERROR  ) << "An error.";
+
+  string text = GetCapturedTestStderr();
+  EXPECT_EQ(
+    "[libprotobuf INFO " __FILE__ ":" + SimpleItoa(line + 1) + "] A message.\n"
+    "[libprotobuf WARNING " __FILE__ ":" + SimpleItoa(line + 2) + "] A warning.\n"
+    "[libprotobuf ERROR " __FILE__ ":" + SimpleItoa(line + 3) + "] An error.\n",
+    text);
+}
+
+TEST(LoggingTest, NullLogging) {
+  LogHandler* old_handler = SetLogHandler(NULL);
+
+  CaptureTestStderr();
+  GOOGLE_LOG(INFO   ) << "A message.";
+  GOOGLE_LOG(WARNING) << "A warning.";
+  GOOGLE_LOG(ERROR  ) << "An error.";
+
+  EXPECT_TRUE(SetLogHandler(old_handler) == NULL);
+
+  string text = GetCapturedTestStderr();
+  EXPECT_EQ("", text);
+}
+
+TEST(LoggingTest, CaptureLogging) {
+  captured_messages_.clear();
+
+  LogHandler* old_handler = SetLogHandler(&CaptureLog);
+
+  int start_line = __LINE__;
+  GOOGLE_LOG(ERROR) << "An error.";
+  GOOGLE_LOG(WARNING) << "A warning.";
+
+  EXPECT_TRUE(SetLogHandler(old_handler) == &CaptureLog);
+
+  ASSERT_EQ(2, captured_messages_.size());
+  EXPECT_EQ(
+    "2 " __FILE__ ":" + SimpleItoa(start_line + 1) + ": An error.",
+    captured_messages_[0]);
+  EXPECT_EQ(
+    "1 " __FILE__ ":" + SimpleItoa(start_line + 2) + ": A warning.",
+    captured_messages_[1]);
+}
+
+TEST(LoggingTest, SilenceLogging) {
+  captured_messages_.clear();
+
+  LogHandler* old_handler = SetLogHandler(&CaptureLog);
+
+  int line1 = __LINE__; GOOGLE_LOG(INFO) << "Visible1";
+  LogSilencer* silencer1 = new LogSilencer;
+  GOOGLE_LOG(INFO) << "Not visible.";
+  LogSilencer* silencer2 = new LogSilencer;
+  GOOGLE_LOG(INFO) << "Not visible.";
+  delete silencer1;
+  GOOGLE_LOG(INFO) << "Not visible.";
+  delete silencer2;
+  int line2 = __LINE__; GOOGLE_LOG(INFO) << "Visible2";
+
+  EXPECT_TRUE(SetLogHandler(old_handler) == &CaptureLog);
+
+  ASSERT_EQ(2, captured_messages_.size());
+  EXPECT_EQ(
+    "0 " __FILE__ ":" + SimpleItoa(line1) + ": Visible1",
+    captured_messages_[0]);
+  EXPECT_EQ(
+    "0 " __FILE__ ":" + SimpleItoa(line2) + ": Visible2",
+    captured_messages_[1]);
+}
+
+class ClosureTest : public testing::Test {
+ public:
+  void SetA123Method()   { a_ = 123; }
+  static void SetA123Function() { current_instance_->a_ = 123; }
+
+  void SetAMethod(int a)         { a_ = a; }
+  void SetCMethod(string c)      { c_ = c; }
+
+  static void SetAFunction(int a)         { current_instance_->a_ = a; }
+  static void SetCFunction(string c)      { current_instance_->c_ = c; }
+
+  void SetABMethod(int a, const char* b)  { a_ = a; b_ = b; }
+  static void SetABFunction(int a, const char* b) {
+    current_instance_->a_ = a;
+    current_instance_->b_ = b;
+  }
+
+  virtual void SetUp() {
+    current_instance_ = this;
+    a_ = 0;
+    b_ = NULL;
+    c_.clear();
+    permanent_closure_ = NULL;
+  }
+
+  void DeleteClosureInCallback() {
+    delete permanent_closure_;
+  }
+
+  int a_;
+  const char* b_;
+  string c_;
+  Closure* permanent_closure_;
+
+  static ClosureTest* current_instance_;
+};
+
+ClosureTest* ClosureTest::current_instance_ = NULL;
+
+TEST_F(ClosureTest, TestClosureFunction0) {
+  Closure* closure = NewCallback(&SetA123Function);
+  EXPECT_NE(123, a_);
+  closure->Run();
+  EXPECT_EQ(123, a_);
+}
+
+TEST_F(ClosureTest, TestClosureMethod0) {
+  Closure* closure = NewCallback(current_instance_,
+                                 &ClosureTest::SetA123Method);
+  EXPECT_NE(123, a_);
+  closure->Run();
+  EXPECT_EQ(123, a_);
+}
+
+TEST_F(ClosureTest, TestClosureFunction1) {
+  Closure* closure = NewCallback(&SetAFunction, 456);
+  EXPECT_NE(456, a_);
+  closure->Run();
+  EXPECT_EQ(456, a_);
+}
+
+TEST_F(ClosureTest, TestClosureMethod1) {
+  Closure* closure = NewCallback(current_instance_,
+                                 &ClosureTest::SetAMethod, 456);
+  EXPECT_NE(456, a_);
+  closure->Run();
+  EXPECT_EQ(456, a_);
+}
+
+TEST_F(ClosureTest, TestClosureFunction1String) {
+  Closure* closure = NewCallback(&SetCFunction, string("test"));
+  EXPECT_NE("test", c_);
+  closure->Run();
+  EXPECT_EQ("test", c_);
+}
+
+TEST_F(ClosureTest, TestClosureMethod1String) {
+  Closure* closure = NewCallback(current_instance_,
+                                 &ClosureTest::SetCMethod, string("test"));
+  EXPECT_NE("test", c_);
+  closure->Run();
+  EXPECT_EQ("test", c_);
+}
+
+TEST_F(ClosureTest, TestClosureFunction2) {
+  const char* cstr = "hello";
+  Closure* closure = NewCallback(&SetABFunction, 789, cstr);
+  EXPECT_NE(789, a_);
+  EXPECT_NE(cstr, b_);
+  closure->Run();
+  EXPECT_EQ(789, a_);
+  EXPECT_EQ(cstr, b_);
+}
+
+TEST_F(ClosureTest, TestClosureMethod2) {
+  const char* cstr = "hello";
+  Closure* closure = NewCallback(current_instance_,
+                                 &ClosureTest::SetABMethod, 789, cstr);
+  EXPECT_NE(789, a_);
+  EXPECT_NE(cstr, b_);
+  closure->Run();
+  EXPECT_EQ(789, a_);
+  EXPECT_EQ(cstr, b_);
+}
+
+// Repeat all of the above with NewPermanentCallback()
+
+TEST_F(ClosureTest, TestPermanentClosureFunction0) {
+  Closure* closure = NewPermanentCallback(&SetA123Function);
+  EXPECT_NE(123, a_);
+  closure->Run();
+  EXPECT_EQ(123, a_);
+  a_ = 0;
+  closure->Run();
+  EXPECT_EQ(123, a_);
+  delete closure;
+}
+
+TEST_F(ClosureTest, TestPermanentClosureMethod0) {
+  Closure* closure = NewPermanentCallback(current_instance_,
+                                          &ClosureTest::SetA123Method);
+  EXPECT_NE(123, a_);
+  closure->Run();
+  EXPECT_EQ(123, a_);
+  a_ = 0;
+  closure->Run();
+  EXPECT_EQ(123, a_);
+  delete closure;
+}
+
+TEST_F(ClosureTest, TestPermanentClosureFunction1) {
+  Closure* closure = NewPermanentCallback(&SetAFunction, 456);
+  EXPECT_NE(456, a_);
+  closure->Run();
+  EXPECT_EQ(456, a_);
+  a_ = 0;
+  closure->Run();
+  EXPECT_EQ(456, a_);
+  delete closure;
+}
+
+TEST_F(ClosureTest, TestPermanentClosureMethod1) {
+  Closure* closure = NewPermanentCallback(current_instance_,
+                                          &ClosureTest::SetAMethod, 456);
+  EXPECT_NE(456, a_);
+  closure->Run();
+  EXPECT_EQ(456, a_);
+  a_ = 0;
+  closure->Run();
+  EXPECT_EQ(456, a_);
+  delete closure;
+}
+
+TEST_F(ClosureTest, TestPermanentClosureFunction2) {
+  const char* cstr = "hello";
+  Closure* closure = NewPermanentCallback(&SetABFunction, 789, cstr);
+  EXPECT_NE(789, a_);
+  EXPECT_NE(cstr, b_);
+  closure->Run();
+  EXPECT_EQ(789, a_);
+  EXPECT_EQ(cstr, b_);
+  a_ = 0;
+  b_ = NULL;
+  closure->Run();
+  EXPECT_EQ(789, a_);
+  EXPECT_EQ(cstr, b_);
+  delete closure;
+}
+
+TEST_F(ClosureTest, TestPermanentClosureMethod2) {
+  const char* cstr = "hello";
+  Closure* closure = NewPermanentCallback(current_instance_,
+                                          &ClosureTest::SetABMethod, 789, cstr);
+  EXPECT_NE(789, a_);
+  EXPECT_NE(cstr, b_);
+  closure->Run();
+  EXPECT_EQ(789, a_);
+  EXPECT_EQ(cstr, b_);
+  a_ = 0;
+  b_ = NULL;
+  closure->Run();
+  EXPECT_EQ(789, a_);
+  EXPECT_EQ(cstr, b_);
+  delete closure;
+}
+
+TEST_F(ClosureTest, TestPermanentClosureDeleteInCallback) {
+  permanent_closure_ = NewPermanentCallback((ClosureTest*) this,
+      &ClosureTest::DeleteClosureInCallback);
+  permanent_closure_->Run();
+}
+
+}  // anonymous namespace
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/fastmem.h b/src/google/protobuf/stubs/fastmem.h
new file mode 100644
index 0000000..763a6e6
--- /dev/null
+++ b/src/google/protobuf/stubs/fastmem.h
@@ -0,0 +1,152 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2014 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Fast memory copying and comparison routines.
+//   strings::fastmemcmp_inlined() replaces memcmp()
+//   strings::memcpy_inlined() replaces memcpy()
+//   strings::memeq(a, b, n) replaces memcmp(a, b, n) == 0
+//
+// strings::*_inlined() routines are inline versions of the
+// routines exported by this module.  Sometimes using the inlined
+// versions is faster.  Measure before using the inlined versions.
+//
+// Performance measurement:
+//   strings::fastmemcmp_inlined
+//     Analysis: memcmp, fastmemcmp_inlined, fastmemcmp
+//     2012-01-30
+
+#ifndef GOOGLE_PROTOBUF_STUBS_FASTMEM_H_
+#define GOOGLE_PROTOBUF_STUBS_FASTMEM_H_
+
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <google/protobuf/stubs/common.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// Return true if the n bytes at a equal the n bytes at b.
+// The regions are allowed to overlap.
+//
+// The performance is similar to the performance memcmp(), but faster for
+// moderately-sized inputs, or inputs that share a common prefix and differ
+// somewhere in their last 8 bytes. Further optimizations can be added later
+// if it makes sense to do so.:w
+inline bool memeq(const char* a, const char* b, size_t n) {
+  size_t n_rounded_down = n & ~static_cast<size_t>(7);
+  if (GOOGLE_PREDICT_FALSE(n_rounded_down == 0)) {  // n <= 7
+    return memcmp(a, b, n) == 0;
+  }
+  // n >= 8
+  uint64 u = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b);
+  uint64 v = GOOGLE_UNALIGNED_LOAD64(a + n - 8) ^ GOOGLE_UNALIGNED_LOAD64(b + n - 8);
+  if ((u | v) != 0) {  // The first or last 8 bytes differ.
+    return false;
+  }
+  a += 8;
+  b += 8;
+  n = n_rounded_down - 8;
+  if (n > 128) {
+    // As of 2012, memcmp on x86-64 uses a big unrolled loop with SSE2
+    // instructions, and while we could try to do something faster, it
+    // doesn't seem worth pursuing.
+    return memcmp(a, b, n) == 0;
+  }
+  for (; n >= 16; n -= 16) {
+    uint64 x = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b);
+    uint64 y = GOOGLE_UNALIGNED_LOAD64(a + 8) ^ GOOGLE_UNALIGNED_LOAD64(b + 8);
+    if ((x | y) != 0) {
+      return false;
+    }
+    a += 16;
+    b += 16;
+  }
+  // n must be 0 or 8 now because it was a multiple of 8 at the top of the loop.
+  return n == 0 || GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b);
+}
+
+inline int fastmemcmp_inlined(const char *a, const char *b, size_t n) {
+  if (n >= 64) {
+    return memcmp(a, b, n);
+  }
+  const char* a_limit = a + n;
+  while (a + sizeof(uint64) <= a_limit &&
+         GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b)) {
+    a += sizeof(uint64);
+    b += sizeof(uint64);
+  }
+  if (a + sizeof(uint32) <= a_limit &&
+      GOOGLE_UNALIGNED_LOAD32(a) == GOOGLE_UNALIGNED_LOAD32(b)) {
+    a += sizeof(uint32);
+    b += sizeof(uint32);
+  }
+  while (a < a_limit) {
+    int d = static_cast<uint32>(*a++) - static_cast<uint32>(*b++);
+    if (d) return d;
+  }
+  return 0;
+}
+
+// The standard memcpy operation is slow for variable small sizes.
+// This implementation inlines the optimal realization for sizes 1 to 16.
+// To avoid code bloat don't use it in case of not performance-critical spots,
+// nor when you don't expect very frequent values of size <= 16.
+inline void memcpy_inlined(char *dst, const char *src, size_t size) {
+  // Compiler inlines code with minimal amount of data movement when third
+  // parameter of memcpy is a constant.
+  switch (size) {
+    case  1: memcpy(dst, src, 1); break;
+    case  2: memcpy(dst, src, 2); break;
+    case  3: memcpy(dst, src, 3); break;
+    case  4: memcpy(dst, src, 4); break;
+    case  5: memcpy(dst, src, 5); break;
+    case  6: memcpy(dst, src, 6); break;
+    case  7: memcpy(dst, src, 7); break;
+    case  8: memcpy(dst, src, 8); break;
+    case  9: memcpy(dst, src, 9); break;
+    case 10: memcpy(dst, src, 10); break;
+    case 11: memcpy(dst, src, 11); break;
+    case 12: memcpy(dst, src, 12); break;
+    case 13: memcpy(dst, src, 13); break;
+    case 14: memcpy(dst, src, 14); break;
+    case 15: memcpy(dst, src, 15); break;
+    case 16: memcpy(dst, src, 16); break;
+    default: memcpy(dst, src, size); break;
+  }
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_FASTMEM_H_
diff --git a/src/google/protobuf/stubs/hash.h b/src/google/protobuf/stubs/hash.h
new file mode 100755
index 0000000..5833432
--- /dev/null
+++ b/src/google/protobuf/stubs/hash.h
@@ -0,0 +1,374 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// Deals with the fact that hash_map is not defined everywhere.
+
+#ifndef GOOGLE_PROTOBUF_STUBS_HASH_H__
+#define GOOGLE_PROTOBUF_STUBS_HASH_H__
+
+#include <string.h>
+#include <google/protobuf/stubs/common.h>
+
+#define GOOGLE_PROTOBUF_HAVE_HASH_MAP 1
+#define GOOGLE_PROTOBUF_HAVE_HASH_SET 1
+
+// Android
+#if defined(__ANDROID__)
+# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP
+# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP
+
+// Use C++11 unordered_{map|set} if available.
+#elif ((_LIBCPP_STD_VER >= 11) || \
+      (((__cplusplus >= 201103L) || defined(__GXX_EXPERIMENTAL_CXX0X)) && \
+      (__GLIBCXX__ > 20090421)))
+# define GOOGLE_PROTOBUF_HAS_CXX11_HASH
+
+// For XCode >= 4.6:  the compiler is clang with libc++.
+// For earlier XCode version: the compiler is gcc-4.2.1 with libstdc++.
+// libc++ provides <unordered_map> and friends even in non C++11 mode,
+// and it does not provide the tr1 library. Therefore the following macro
+// checks against this special case.
+// Note that we should not test the __APPLE_CC__ version number or the
+// __clang__ macro, since the new compiler can still use -stdlib=libstdc++, in
+// which case <unordered_map> is not compilable without -std=c++11
+#elif defined(__APPLE_CC__)
+# if __GNUC__ >= 4
+#  define GOOGLE_PROTOBUF_HAS_TR1
+# else
+// Not tested for gcc < 4... These setting can compile under 4.2.1 though.
+#  define GOOGLE_PROTOBUF_HASH_NAMESPACE __gnu_cxx
+#  include <ext/hash_map>
+#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
+#  include <ext/hash_set>
+#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
+# endif
+
+// Version checks for gcc.
+#elif defined(__GNUC__)
+// For GCC 4.x+, use tr1::unordered_map/set; otherwise, follow the
+// instructions from:
+// https://gcc.gnu.org/onlinedocs/libstdc++/manual/backwards.html
+# if __GNUC__ >= 4
+#  define GOOGLE_PROTOBUF_HAS_TR1
+# elif __GNUC__ >= 3
+#  include <backward/hash_map>
+#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
+#  include <backward/hash_set>
+#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
+#  if __GNUC__ == 3 && __GNUC_MINOR__ == 0
+#   define GOOGLE_PROTOBUF_HASH_NAMESPACE std       // GCC 3.0
+#  else
+#   define GOOGLE_PROTOBUF_HASH_NAMESPACE __gnu_cxx // GCC 3.1 and later
+#  endif
+# else
+#  define GOOGLE_PROTOBUF_HASH_NAMESPACE
+#  include <hash_map>
+#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
+#  include <hash_set>
+#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
+# endif
+
+// Version checks for MSC.
+// Apparently Microsoft decided to move hash_map *back* to the std namespace in
+// MSVC 2010:
+// http://blogs.msdn.com/vcblog/archive/2009/05/25/stl-breaking-changes-in-visual-studio-2010-beta-1.aspx
+// And.. they are moved back to stdext in MSVC 2013 (haven't checked 2012). That
+// said, use unordered_map for MSVC 2010 and beyond is our safest bet.
+#elif defined(_MSC_VER)
+# if _MSC_VER >= 1600  // Since Visual Studio 2010
+#  define GOOGLE_PROTOBUF_HAS_CXX11_HASH
+#  define GOOGLE_PROTOBUF_HASH_COMPARE std::hash_compare
+# elif _MSC_VER >= 1500  // Since Visual Studio 2008
+#  undef GOOGLE_PROTOBUF_HAVE_HASH_MAP
+#  undef GOOGLE_PROTOBUF_HAVE_HASH_SET
+# elif _MSC_VER >= 1310
+#  define GOOGLE_PROTOBUF_HASH_NAMESPACE stdext
+#  include <hash_map>
+#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
+#  include <hash_set>
+#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
+#  define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare
+# else
+#  define GOOGLE_PROTOBUF_HASH_NAMESPACE std
+#  include <hash_map>
+#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
+#  include <hash_set>
+#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
+#  define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare
+# endif
+
+// **ADD NEW COMPILERS SUPPORT HERE.**
+// For other compilers, undefine the macro and fallback to use std::map, in
+// google/protobuf/stubs/hash.h
+#else
+# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP
+# undef GOOGLE_PROTOBUF_HAVE_HASH_SET
+#endif
+
+#if defined(GOOGLE_PROTOBUF_HAS_CXX11_HASH)
+# define GOOGLE_PROTOBUF_HASH_NAMESPACE std
+# include <unordered_map>
+# define GOOGLE_PROTOBUF_HASH_MAP_CLASS unordered_map
+# include <unordered_set>
+# define GOOGLE_PROTOBUF_HASH_SET_CLASS unordered_set
+#elif defined(GOOGLE_PROTOBUF_HAS_TR1)
+# define GOOGLE_PROTOBUF_HASH_NAMESPACE std::tr1
+# include <tr1/unordered_map>
+# define GOOGLE_PROTOBUF_HASH_MAP_CLASS unordered_map
+# include <tr1/unordered_set>
+# define GOOGLE_PROTOBUF_HASH_SET_CLASS unordered_set
+#endif
+
+# define GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_START \
+  namespace google {                                      \
+  namespace protobuf {
+# define GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_END }}
+
+#undef GOOGLE_PROTOBUF_HAS_CXX11_HASH
+#undef GOOGLE_PROTOBUF_HAS_TR1
+
+#if defined(GOOGLE_PROTOBUF_HAVE_HASH_MAP) && \
+    defined(GOOGLE_PROTOBUF_HAVE_HASH_SET)
+#else
+#define GOOGLE_PROTOBUF_MISSING_HASH
+#include <map>
+#include <set>
+#endif
+
+namespace google {
+namespace protobuf {
+
+#ifdef GOOGLE_PROTOBUF_MISSING_HASH
+#undef GOOGLE_PROTOBUF_MISSING_HASH
+
+// This system doesn't have hash_map or hash_set.  Emulate them using map and
+// set.
+
+// Make hash<T> be the same as less<T>.  Note that everywhere where custom
+// hash functions are defined in the protobuf code, they are also defined such
+// that they can be used as "less" functions, which is required by MSVC anyway.
+template <typename Key>
+struct hash {
+  // Dummy, just to make derivative hash functions compile.
+  int operator()(const Key& key) {
+    GOOGLE_LOG(FATAL) << "Should never be called.";
+    return 0;
+  }
+
+  inline bool operator()(const Key& a, const Key& b) const {
+    return a < b;
+  }
+};
+
+// Make sure char* is compared by value.
+template <>
+struct hash<const char*> {
+  // Dummy, just to make derivative hash functions compile.
+  int operator()(const char* key) {
+    GOOGLE_LOG(FATAL) << "Should never be called.";
+    return 0;
+  }
+
+  inline bool operator()(const char* a, const char* b) const {
+    return strcmp(a, b) < 0;
+  }
+};
+
+template <typename Key, typename Data,
+          typename HashFcn = hash<Key>,
+          typename EqualKey = std::equal_to<Key>,
+          typename Alloc = std::allocator< std::pair<const Key, Data> > >
+class hash_map : public std::map<Key, Data, HashFcn, Alloc> {
+  typedef std::map<Key, Data, HashFcn, Alloc> BaseClass;
+
+ public:
+  hash_map(int a = 0, const HashFcn& b = HashFcn(),
+           const EqualKey& c = EqualKey(),
+           const Alloc& d = Alloc()) : BaseClass(b, d) {}
+};
+
+template <typename Key,
+          typename HashFcn = hash<Key>,
+          typename EqualKey = std::equal_to<Key> >
+class hash_set : public std::set<Key, HashFcn> {
+ public:
+  hash_set(int = 0) {}
+};
+
+#elif defined(_MSC_VER) && !defined(_STLPORT_VERSION)
+
+template <typename Key>
+struct hash : public GOOGLE_PROTOBUF_HASH_COMPARE<Key> {
+};
+
+// MSVC's hash_compare<const char*> hashes based on the string contents but
+// compares based on the string pointer.  WTF?
+class CstringLess {
+ public:
+  inline bool operator()(const char* a, const char* b) const {
+    return strcmp(a, b) < 0;
+  }
+};
+
+template <>
+struct hash<const char*>
+    : public GOOGLE_PROTOBUF_HASH_COMPARE<const char*, CstringLess> {};
+
+template <typename Key, typename Data,
+          typename HashFcn = hash<Key>,
+          typename EqualKey = std::equal_to<Key>,
+          typename Alloc = std::allocator< std::pair<const Key, Data> > >
+class hash_map
+    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
+          Key, Data, HashFcn, EqualKey, Alloc> {
+  typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
+      Key, Data, HashFcn, EqualKey, Alloc> BaseClass;
+
+ public:
+  hash_map(int a = 0, const HashFcn& b = HashFcn(),
+           const EqualKey& c = EqualKey(),
+           const Alloc& d = Alloc()) : BaseClass(a, b, c, d) {}
+};
+
+template <typename Key, typename HashFcn = hash<Key>,
+          typename EqualKey = std::equal_to<Key> >
+class hash_set
+    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS<
+          Key, HashFcn, EqualKey> {
+ public:
+  hash_set(int = 0) {}
+};
+
+#else
+
+template <typename Key>
+struct hash : public GOOGLE_PROTOBUF_HASH_NAMESPACE::hash<Key> {
+};
+
+template <typename Key>
+struct hash<const Key*> {
+  inline size_t operator()(const Key* key) const {
+    return reinterpret_cast<size_t>(key);
+  }
+};
+
+// Unlike the old SGI version, the TR1 "hash" does not special-case char*.  So,
+// we go ahead and provide our own implementation.
+template <>
+struct hash<const char*> {
+  inline size_t operator()(const char* str) const {
+    size_t result = 0;
+    for (; *str != '\0'; str++) {
+      result = 5 * result + *str;
+    }
+    return result;
+  }
+};
+
+template<>
+struct hash<bool> {
+  size_t operator()(bool x) const {
+    return static_cast<size_t>(x);
+  }
+};
+
+template <typename Key, typename Data,
+          typename HashFcn = hash<Key>,
+          typename EqualKey = std::equal_to<Key>,
+          typename Alloc = std::allocator< std::pair<const Key, Data> > >
+class hash_map
+    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
+          Key, Data, HashFcn, EqualKey, Alloc> {
+  typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
+      Key, Data, HashFcn, EqualKey, Alloc> BaseClass;
+
+ public:
+  hash_map(int a = 0, const HashFcn& b = HashFcn(),
+           const EqualKey& c = EqualKey(),
+           const Alloc& d = Alloc()) : BaseClass(a, b, c, d) {}
+};
+
+template <typename Key, typename HashFcn = hash<Key>,
+          typename EqualKey = std::equal_to<Key> >
+class hash_set
+    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS<
+          Key, HashFcn, EqualKey> {
+ public:
+  hash_set(int = 0) {}
+};
+
+#endif  // !GOOGLE_PROTOBUF_MISSING_HASH
+
+template <>
+struct hash<string> {
+  inline size_t operator()(const string& key) const {
+    return hash<const char*>()(key.c_str());
+  }
+
+  static const size_t bucket_size = 4;
+  static const size_t min_buckets = 8;
+  inline bool operator()(const string& a, const string& b) const {
+    return a < b;
+  }
+};
+
+template <typename First, typename Second>
+struct hash<pair<First, Second> > {
+  inline size_t operator()(const pair<First, Second>& key) const {
+    size_t first_hash = hash<First>()(key.first);
+    size_t second_hash = hash<Second>()(key.second);
+
+    // FIXME(kenton):  What is the best way to compute this hash?  I have
+    // no idea!  This seems a bit better than an XOR.
+    return first_hash * ((1 << 16) - 1) + second_hash;
+  }
+
+  static const size_t bucket_size = 4;
+  static const size_t min_buckets = 8;
+  inline bool operator()(const pair<First, Second>& a,
+                           const pair<First, Second>& b) const {
+    return a < b;
+  }
+};
+
+// Used by GCC/SGI STL only.  (Why isn't this provided by the standard
+// library?  :( )
+struct streq {
+  inline bool operator()(const char* a, const char* b) const {
+    return strcmp(a, b) == 0;
+  }
+};
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_HASH_H__
diff --git a/src/google/protobuf/stubs/int128.cc b/src/google/protobuf/stubs/int128.cc
new file mode 100644
index 0000000..d80c64f
--- /dev/null
+++ b/src/google/protobuf/stubs/int128.cc
@@ -0,0 +1,201 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <google/protobuf/stubs/int128.h>
+
+#include <iomanip>
+#include <iostream>  // NOLINT(readability/streams)
+#include <sstream>
+
+namespace google {
+namespace protobuf {
+
+const uint128_pod kuint128max = {
+    static_cast<uint64>(GOOGLE_LONGLONG(0xFFFFFFFFFFFFFFFF)),
+    static_cast<uint64>(GOOGLE_LONGLONG(0xFFFFFFFFFFFFFFFF))
+};
+
+// Returns the 0-based position of the last set bit (i.e., most significant bit)
+// in the given uint64. The argument may not be 0.
+//
+// For example:
+//   Given: 5 (decimal) == 101 (binary)
+//   Returns: 2
+#define STEP(T, n, pos, sh)                   \
+  do {                                        \
+    if ((n) >= (static_cast<T>(1) << (sh))) { \
+      (n) = (n) >> (sh);                      \
+      (pos) |= (sh);                          \
+    }                                         \
+  } while (0)
+static inline int Fls64(uint64 n) {
+  GOOGLE_DCHECK_NE(0, n);
+  int pos = 0;
+  STEP(uint64, n, pos, 0x20);
+  uint32 n32 = n;
+  STEP(uint32, n32, pos, 0x10);
+  STEP(uint32, n32, pos, 0x08);
+  STEP(uint32, n32, pos, 0x04);
+  return pos + ((GOOGLE_ULONGLONG(0x3333333322221100) >> (n32 << 2)) & 0x3);
+}
+#undef STEP
+
+// Like Fls64() above, but returns the 0-based position of the last set bit
+// (i.e., most significant bit) in the given uint128. The argument may not be 0.
+static inline int Fls128(uint128 n) {
+  if (uint64 hi = Uint128High64(n)) {
+    return Fls64(hi) + 64;
+  }
+  return Fls64(Uint128Low64(n));
+}
+
+// Long division/modulo for uint128 implemented using the shift-subtract
+// division algorithm adapted from:
+// http://stackoverflow.com/questions/5386377/division-without-using
+void uint128::DivModImpl(uint128 dividend, uint128 divisor,
+                         uint128* quotient_ret, uint128* remainder_ret) {
+  if (divisor == 0) {
+    GOOGLE_LOG(FATAL) << "Division or mod by zero: dividend.hi=" << dividend.hi_
+                      << ", lo=" << dividend.lo_;
+  }
+
+  if (divisor > dividend) {
+    *quotient_ret = 0;
+    *remainder_ret = dividend;
+    return;
+  }
+
+  if (divisor == dividend) {
+    *quotient_ret = 1;
+    *remainder_ret = 0;
+    return;
+  }
+
+  uint128 denominator = divisor;
+  uint128 position = 1;
+  uint128 quotient = 0;
+
+  // Left aligns the MSB of the denominator and the dividend.
+  int shift = Fls128(dividend) - Fls128(denominator);
+  denominator <<= shift;
+  position <<= shift;
+
+  // Uses shift-subtract algorithm to divide dividend by denominator. The
+  // remainder will be left in dividend.
+  while (position > 0) {
+    if (dividend >= denominator) {
+      dividend -= denominator;
+      quotient |= position;
+    }
+    position >>= 1;
+    denominator >>= 1;
+  }
+
+  *quotient_ret = quotient;
+  *remainder_ret = dividend;
+}
+
+uint128& uint128::operator/=(const uint128& divisor) {
+  uint128 quotient = 0;
+  uint128 remainder = 0;
+  DivModImpl(*this, divisor, &quotient, &remainder);
+  *this = quotient;
+  return *this;
+}
+uint128& uint128::operator%=(const uint128& divisor) {
+  uint128 quotient = 0;
+  uint128 remainder = 0;
+  DivModImpl(*this, divisor, &quotient, &remainder);
+  *this = remainder;
+  return *this;
+}
+
+std::ostream& operator<<(std::ostream& o, const uint128& b) {
+  std::ios_base::fmtflags flags = o.flags();
+
+  // Select a divisor which is the largest power of the base < 2^64.
+  uint128 div;
+  std::streamsize div_base_log;
+  switch (flags & std::ios::basefield) {
+    case std::ios::hex:
+      div = GOOGLE_ULONGLONG(0x1000000000000000);  // 16^15
+      div_base_log = 15;
+      break;
+    case std::ios::oct:
+      div = GOOGLE_ULONGLONG(01000000000000000000000);  // 8^21
+      div_base_log = 21;
+      break;
+    default:  // std::ios::dec
+      div = GOOGLE_ULONGLONG(10000000000000000000);  // 10^19
+      div_base_log = 19;
+      break;
+  }
+
+  // Now piece together the uint128 representation from three chunks of
+  // the original value, each less than "div" and therefore representable
+  // as a uint64.
+  std::ostringstream os;
+  std::ios_base::fmtflags copy_mask =
+      std::ios::basefield | std::ios::showbase | std::ios::uppercase;
+  os.setf(flags & copy_mask, copy_mask);
+  uint128 high = b;
+  uint128 low;
+  uint128::DivModImpl(high, div, &high, &low);
+  uint128 mid;
+  uint128::DivModImpl(high, div, &high, &mid);
+  if (high.lo_ != 0) {
+    os << high.lo_;
+    os << std::noshowbase << std::setfill('0') << std::setw(div_base_log);
+    os << mid.lo_;
+    os << std::setw(div_base_log);
+  } else if (mid.lo_ != 0) {
+    os << mid.lo_;
+    os << std::noshowbase << std::setfill('0') << std::setw(div_base_log);
+  }
+  os << low.lo_;
+  std::string rep = os.str();
+
+  // Add the requisite padding.
+  std::streamsize width = o.width(0);
+  if (width > rep.size()) {
+    if ((flags & std::ios::adjustfield) == std::ios::left) {
+      rep.append(width - rep.size(), o.fill());
+    } else {
+      rep.insert(static_cast<std::string::size_type>(0),
+                 width - rep.size(), o.fill());
+    }
+  }
+
+  // Stream the final representation in a single "<<" call.
+  return o << rep;
+}
+
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/int128.h b/src/google/protobuf/stubs/int128.h
new file mode 100644
index 0000000..1499bb7
--- /dev/null
+++ b/src/google/protobuf/stubs/int128.h
@@ -0,0 +1,383 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef GOOGLE_PROTOBUF_STUBS_INT128_H_
+#define GOOGLE_PROTOBUF_STUBS_INT128_H_
+
+#include <google/protobuf/stubs/common.h>
+
+#include <iosfwd>
+
+namespace google {
+namespace protobuf {
+
+struct uint128_pod;
+
+// TODO(xiaofeng): Define GOOGLE_PROTOBUF_HAS_CONSTEXPR when constexpr is
+// available.
+#ifdef GOOGLE_PROTOBUF_HAS_CONSTEXPR
+# define UINT128_CONSTEXPR constexpr
+#else
+# define UINT128_CONSTEXPR
+#endif
+
+// An unsigned 128-bit integer type. Thread-compatible.
+class LIBPROTOBUF_EXPORT uint128 {
+ public:
+  UINT128_CONSTEXPR uint128();  // Sets to 0, but don't trust on this behavior.
+  UINT128_CONSTEXPR uint128(uint64 top, uint64 bottom);
+#ifndef SWIG
+  UINT128_CONSTEXPR uint128(int bottom);
+  UINT128_CONSTEXPR uint128(uint32 bottom);   // Top 96 bits = 0
+#endif
+  UINT128_CONSTEXPR uint128(uint64 bottom);   // hi_ = 0
+  UINT128_CONSTEXPR uint128(const uint128_pod &val);
+
+  // Trivial copy constructor, assignment operator and destructor.
+
+  void Initialize(uint64 top, uint64 bottom);
+
+  // Arithmetic operators.
+  uint128& operator+=(const uint128& b);
+  uint128& operator-=(const uint128& b);
+  uint128& operator*=(const uint128& b);
+  // Long division/modulo for uint128.
+  uint128& operator/=(const uint128& b);
+  uint128& operator%=(const uint128& b);
+  uint128 operator++(int);
+  uint128 operator--(int);
+  uint128& operator<<=(int);
+  uint128& operator>>=(int);
+  uint128& operator&=(const uint128& b);
+  uint128& operator|=(const uint128& b);
+  uint128& operator^=(const uint128& b);
+  uint128& operator++();
+  uint128& operator--();
+
+  friend uint64 Uint128Low64(const uint128& v);
+  friend uint64 Uint128High64(const uint128& v);
+
+  // We add "std::" to avoid including all of port.h.
+  LIBPROTOBUF_EXPORT friend std::ostream& operator<<(std::ostream& o,
+                                                     const uint128& b);
+
+ private:
+  static void DivModImpl(uint128 dividend, uint128 divisor,
+                         uint128* quotient_ret, uint128* remainder_ret);
+
+  // Little-endian memory order optimizations can benefit from
+  // having lo_ first, hi_ last.
+  // See util/endian/endian.h and Load128/Store128 for storing a uint128.
+  uint64        lo_;
+  uint64        hi_;
+
+  // Not implemented, just declared for catching automatic type conversions.
+  uint128(uint8);
+  uint128(uint16);
+  uint128(float v);
+  uint128(double v);
+};
+
+// This is a POD form of uint128 which can be used for static variables which
+// need to be operated on as uint128.
+struct uint128_pod {
+  // Note: The ordering of fields is different than 'class uint128' but the
+  // same as its 2-arg constructor.  This enables more obvious initialization
+  // of static instances, which is the primary reason for this struct in the
+  // first place.  This does not seem to defeat any optimizations wrt
+  // operations involving this struct.
+  uint64 hi;
+  uint64 lo;
+};
+
+LIBPROTOBUF_EXPORT extern const uint128_pod kuint128max;
+
+// allow uint128 to be logged
+LIBPROTOBUF_EXPORT extern std::ostream& operator<<(std::ostream& o,
+                                                   const uint128& b);
+
+// Methods to access low and high pieces of 128-bit value.
+// Defined externally from uint128 to facilitate conversion
+// to native 128-bit types when compilers support them.
+inline uint64 Uint128Low64(const uint128& v) { return v.lo_; }
+inline uint64 Uint128High64(const uint128& v) { return v.hi_; }
+
+// TODO: perhaps it would be nice to have int128, a signed 128-bit type?
+
+// --------------------------------------------------------------------------
+//                      Implementation details follow
+// --------------------------------------------------------------------------
+inline bool operator==(const uint128& lhs, const uint128& rhs) {
+  return (Uint128Low64(lhs) == Uint128Low64(rhs) &&
+          Uint128High64(lhs) == Uint128High64(rhs));
+}
+inline bool operator!=(const uint128& lhs, const uint128& rhs) {
+  return !(lhs == rhs);
+}
+
+inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {}
+inline UINT128_CONSTEXPR uint128::uint128(uint64 top, uint64 bottom)
+    : lo_(bottom), hi_(top) {}
+inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v)
+    : lo_(v.lo), hi_(v.hi) {}
+inline UINT128_CONSTEXPR uint128::uint128(uint64 bottom)
+    : lo_(bottom), hi_(0) {}
+#ifndef SWIG
+inline UINT128_CONSTEXPR uint128::uint128(uint32 bottom)
+    : lo_(bottom), hi_(0) {}
+inline UINT128_CONSTEXPR uint128::uint128(int bottom)
+    : lo_(bottom), hi_(static_cast<int64>((bottom < 0) ? -1 : 0)) {}
+#endif
+
+#undef UINT128_CONSTEXPR
+
+inline void uint128::Initialize(uint64 top, uint64 bottom) {
+  hi_ = top;
+  lo_ = bottom;
+}
+
+// Comparison operators.
+
+#define CMP128(op)                                                \
+inline bool operator op(const uint128& lhs, const uint128& rhs) { \
+  return (Uint128High64(lhs) == Uint128High64(rhs)) ?             \
+      (Uint128Low64(lhs) op Uint128Low64(rhs)) :                  \
+      (Uint128High64(lhs) op Uint128High64(rhs));                 \
+}
+
+CMP128(<)
+CMP128(>)
+CMP128(>=)
+CMP128(<=)
+
+#undef CMP128
+
+// Unary operators
+
+inline uint128 operator-(const uint128& val) {
+  const uint64 hi_flip = ~Uint128High64(val);
+  const uint64 lo_flip = ~Uint128Low64(val);
+  const uint64 lo_add = lo_flip + 1;
+  if (lo_add < lo_flip) {
+    return uint128(hi_flip + 1, lo_add);
+  }
+  return uint128(hi_flip, lo_add);
+}
+
+inline bool operator!(const uint128& val) {
+  return !Uint128High64(val) && !Uint128Low64(val);
+}
+
+// Logical operators.
+
+inline uint128 operator~(const uint128& val) {
+  return uint128(~Uint128High64(val), ~Uint128Low64(val));
+}
+
+#define LOGIC128(op)                                                 \
+inline uint128 operator op(const uint128& lhs, const uint128& rhs) { \
+  return uint128(Uint128High64(lhs) op Uint128High64(rhs),           \
+                 Uint128Low64(lhs) op Uint128Low64(rhs));            \
+}
+
+LOGIC128(|)
+LOGIC128(&)
+LOGIC128(^)
+
+#undef LOGIC128
+
+#define LOGICASSIGN128(op)                                   \
+inline uint128& uint128::operator op(const uint128& other) { \
+  hi_ op other.hi_;                                          \
+  lo_ op other.lo_;                                          \
+  return *this;                                              \
+}
+
+LOGICASSIGN128(|=)
+LOGICASSIGN128(&=)
+LOGICASSIGN128(^=)
+
+#undef LOGICASSIGN128
+
+// Shift operators.
+
+inline uint128 operator<<(const uint128& val, int amount) {
+  // uint64 shifts of >= 64 are undefined, so we will need some special-casing.
+  if (amount < 64) {
+    if (amount == 0) {
+      return val;
+    }
+    uint64 new_hi = (Uint128High64(val) << amount) |
+                    (Uint128Low64(val) >> (64 - amount));
+    uint64 new_lo = Uint128Low64(val) << amount;
+    return uint128(new_hi, new_lo);
+  } else if (amount < 128) {
+    return uint128(Uint128Low64(val) << (amount - 64), 0);
+  } else {
+    return uint128(0, 0);
+  }
+}
+
+inline uint128 operator>>(const uint128& val, int amount) {
+  // uint64 shifts of >= 64 are undefined, so we will need some special-casing.
+  if (amount < 64) {
+    if (amount == 0) {
+      return val;
+    }
+    uint64 new_hi = Uint128High64(val) >> amount;
+    uint64 new_lo = (Uint128Low64(val) >> amount) |
+                    (Uint128High64(val) << (64 - amount));
+    return uint128(new_hi, new_lo);
+  } else if (amount < 128) {
+    return uint128(0, Uint128High64(val) >> (amount - 64));
+  } else {
+    return uint128(0, 0);
+  }
+}
+
+inline uint128& uint128::operator<<=(int amount) {
+  // uint64 shifts of >= 64 are undefined, so we will need some special-casing.
+  if (amount < 64) {
+    if (amount != 0) {
+      hi_ = (hi_ << amount) | (lo_ >> (64 - amount));
+      lo_ = lo_ << amount;
+    }
+  } else if (amount < 128) {
+    hi_ = lo_ << (amount - 64);
+    lo_ = 0;
+  } else {
+    hi_ = 0;
+    lo_ = 0;
+  }
+  return *this;
+}
+
+inline uint128& uint128::operator>>=(int amount) {
+  // uint64 shifts of >= 64 are undefined, so we will need some special-casing.
+  if (amount < 64) {
+    if (amount != 0) {
+      lo_ = (lo_ >> amount) | (hi_ << (64 - amount));
+      hi_ = hi_ >> amount;
+    }
+  } else if (amount < 128) {
+    lo_ = hi_ >> (amount - 64);
+    hi_ = 0;
+  } else {
+    lo_ = 0;
+    hi_ = 0;
+  }
+  return *this;
+}
+
+inline uint128 operator+(const uint128& lhs, const uint128& rhs) {
+  return uint128(lhs) += rhs;
+}
+
+inline uint128 operator-(const uint128& lhs, const uint128& rhs) {
+  return uint128(lhs) -= rhs;
+}
+
+inline uint128 operator*(const uint128& lhs, const uint128& rhs) {
+  return uint128(lhs) *= rhs;
+}
+
+inline uint128 operator/(const uint128& lhs, const uint128& rhs) {
+  return uint128(lhs) /= rhs;
+}
+
+inline uint128 operator%(const uint128& lhs, const uint128& rhs) {
+  return uint128(lhs) %= rhs;
+}
+
+inline uint128& uint128::operator+=(const uint128& b) {
+  hi_ += b.hi_;
+  uint64 lolo = lo_ + b.lo_;
+  if (lolo < lo_)
+    ++hi_;
+  lo_ = lolo;
+  return *this;
+}
+
+inline uint128& uint128::operator-=(const uint128& b) {
+  hi_ -= b.hi_;
+  if (b.lo_ > lo_)
+    --hi_;
+  lo_ -= b.lo_;
+  return *this;
+}
+
+inline uint128& uint128::operator*=(const uint128& b) {
+  uint64 a96 = hi_ >> 32;
+  uint64 a64 = hi_ & 0xffffffffu;
+  uint64 a32 = lo_ >> 32;
+  uint64 a00 = lo_ & 0xffffffffu;
+  uint64 b96 = b.hi_ >> 32;
+  uint64 b64 = b.hi_ & 0xffffffffu;
+  uint64 b32 = b.lo_ >> 32;
+  uint64 b00 = b.lo_ & 0xffffffffu;
+  // multiply [a96 .. a00] x [b96 .. b00]
+  // terms higher than c96 disappear off the high side
+  // terms c96 and c64 are safe to ignore carry bit
+  uint64 c96 = a96 * b00 + a64 * b32 + a32 * b64 + a00 * b96;
+  uint64 c64 = a64 * b00 + a32 * b32 + a00 * b64;
+  this->hi_ = (c96 << 32) + c64;
+  this->lo_ = 0;
+  // add terms after this one at a time to capture carry
+  *this += uint128(a32 * b00) << 32;
+  *this += uint128(a00 * b32) << 32;
+  *this += a00 * b00;
+  return *this;
+}
+
+inline uint128 uint128::operator++(int) {
+  uint128 tmp(*this);
+  *this += 1;
+  return tmp;
+}
+
+inline uint128 uint128::operator--(int) {
+  uint128 tmp(*this);
+  *this -= 1;
+  return tmp;
+}
+
+inline uint128& uint128::operator++() {
+  *this += 1;
+  return *this;
+}
+
+inline uint128& uint128::operator--() {
+  *this -= 1;
+  return *this;
+}
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_INT128_H_
diff --git a/src/google/protobuf/stubs/int128_unittest.cc b/src/google/protobuf/stubs/int128_unittest.cc
new file mode 100644
index 0000000..5d33292
--- /dev/null
+++ b/src/google/protobuf/stubs/int128_unittest.cc
@@ -0,0 +1,513 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <google/protobuf/stubs/int128.h>
+
+#include <algorithm>
+#include <sstream>
+#include <utility>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+
+TEST(Int128, AllTests) {
+  uint128 zero(0);
+  uint128 one(1);
+  uint128 one_2arg(0, 1);
+  uint128 two(0, 2);
+  uint128 three(0, 3);
+  uint128 big(2000, 2);
+  uint128 big_minus_one(2000, 1);
+  uint128 bigger(2001, 1);
+  uint128 biggest(kuint128max);
+  uint128 high_low(1, 0);
+  uint128 low_high(0, kuint64max);
+  EXPECT_LT(one, two);
+  EXPECT_GT(two, one);
+  EXPECT_LT(one, big);
+  EXPECT_LT(one, big);
+  EXPECT_EQ(one, one_2arg);
+  EXPECT_NE(one, two);
+  EXPECT_GT(big, one);
+  EXPECT_GE(big, two);
+  EXPECT_GE(big, big_minus_one);
+  EXPECT_GT(big, big_minus_one);
+  EXPECT_LT(big_minus_one, big);
+  EXPECT_LE(big_minus_one, big);
+  EXPECT_NE(big_minus_one, big);
+  EXPECT_LT(big, biggest);
+  EXPECT_LE(big, biggest);
+  EXPECT_GT(biggest, big);
+  EXPECT_GE(biggest, big);
+  EXPECT_EQ(big, ~~big);
+  EXPECT_EQ(one, one | one);
+  EXPECT_EQ(big, big | big);
+  EXPECT_EQ(one, one | zero);
+  EXPECT_EQ(one, one & one);
+  EXPECT_EQ(big, big & big);
+  EXPECT_EQ(zero, one & zero);
+  EXPECT_EQ(zero, big & ~big);
+  EXPECT_EQ(zero, one ^ one);
+  EXPECT_EQ(zero, big ^ big);
+  EXPECT_EQ(one, one ^ zero);
+
+  // Shift operators.
+  EXPECT_EQ(big, big << 0);
+  EXPECT_EQ(big, big >> 0);
+  EXPECT_GT(big << 1, big);
+  EXPECT_LT(big >> 1, big);
+  EXPECT_EQ(big, (big << 10) >> 10);
+  EXPECT_EQ(big, (big >> 1) << 1);
+  EXPECT_EQ(one, (one << 80) >> 80);
+  EXPECT_EQ(zero, (one >> 80) << 80);
+  EXPECT_EQ(zero, big >> 128);
+  EXPECT_EQ(zero, big << 128);
+
+  // Shift assignments.
+  uint128 big_copy = big;
+  EXPECT_EQ(big << 0, big_copy <<= 0);
+  big_copy = big;
+  EXPECT_EQ(big >> 0, big_copy >>= 0);
+  big_copy = big;
+  EXPECT_EQ(big << 1, big_copy <<= 1);
+  big_copy = big;
+  EXPECT_EQ(big >> 1, big_copy >>= 1);
+  big_copy = big;
+  EXPECT_EQ(big << 10, big_copy <<= 10);
+  big_copy = big;
+  EXPECT_EQ(big >> 10, big_copy >>= 10);
+  big_copy = big;
+  EXPECT_EQ(big << 64, big_copy <<= 64);
+  big_copy = big;
+  EXPECT_EQ(big >> 64, big_copy >>= 64);
+  big_copy = big;
+  EXPECT_EQ(big << 73, big_copy <<= 73);
+  big_copy = big;
+  EXPECT_EQ(big >> 73, big_copy >>= 73);
+  big_copy = big;
+  EXPECT_EQ(big << 128, big_copy <<= 128);
+  big_copy = big;
+  EXPECT_EQ(big >> 128, big_copy >>= 128);
+
+  EXPECT_EQ(Uint128High64(biggest), kuint64max);
+  EXPECT_EQ(Uint128Low64(biggest), kuint64max);
+  EXPECT_EQ(zero + one, one);
+  EXPECT_EQ(one + one, two);
+  EXPECT_EQ(big_minus_one + one, big);
+  EXPECT_EQ(one - one, zero);
+  EXPECT_EQ(one - zero, one);
+  EXPECT_EQ(zero - one, biggest);
+  EXPECT_EQ(big - big, zero);
+  EXPECT_EQ(big - one, big_minus_one);
+  EXPECT_EQ(big + kuint64max, bigger);
+  EXPECT_EQ(biggest + 1, zero);
+  EXPECT_EQ(zero - 1, biggest);
+  EXPECT_EQ(high_low - one, low_high);
+  EXPECT_EQ(low_high + one, high_low);
+  EXPECT_EQ(Uint128High64((uint128(1) << 64) - 1), 0);
+  EXPECT_EQ(Uint128Low64((uint128(1) << 64) - 1), kuint64max);
+  EXPECT_TRUE(!!one);
+  EXPECT_TRUE(!!high_low);
+  EXPECT_FALSE(!!zero);
+  EXPECT_FALSE(!one);
+  EXPECT_FALSE(!high_low);
+  EXPECT_TRUE(!zero);
+  EXPECT_TRUE(zero == 0);
+  EXPECT_FALSE(zero != 0);
+  EXPECT_FALSE(one == 0);
+  EXPECT_TRUE(one != 0);
+
+  uint128 test = zero;
+  EXPECT_EQ(++test, one);
+  EXPECT_EQ(test, one);
+  EXPECT_EQ(test++, one);
+  EXPECT_EQ(test, two);
+  EXPECT_EQ(test -= 2, zero);
+  EXPECT_EQ(test, zero);
+  EXPECT_EQ(test += 2, two);
+  EXPECT_EQ(test, two);
+  EXPECT_EQ(--test, one);
+  EXPECT_EQ(test, one);
+  EXPECT_EQ(test--, one);
+  EXPECT_EQ(test, zero);
+  EXPECT_EQ(test |= three, three);
+  EXPECT_EQ(test &= one, one);
+  EXPECT_EQ(test ^= three, two);
+  EXPECT_EQ(test >>= 1, one);
+  EXPECT_EQ(test <<= 1, two);
+
+  EXPECT_EQ(big, -(-big));
+  EXPECT_EQ(two, -((-one) - 1));
+  EXPECT_EQ(kuint128max, -one);
+  EXPECT_EQ(zero, -zero);
+
+  GOOGLE_LOG(INFO) << one;
+  GOOGLE_LOG(INFO) << big_minus_one;
+}
+
+TEST(Int128, PodTests) {
+  uint128_pod pod = { 12345, 67890 };
+  uint128 from_pod(pod);
+  EXPECT_EQ(12345, Uint128High64(from_pod));
+  EXPECT_EQ(67890, Uint128Low64(from_pod));
+
+  uint128 zero(0);
+  uint128_pod zero_pod = {0, 0};
+  uint128 one(1);
+  uint128_pod one_pod = {0, 1};
+  uint128 two(2);
+  uint128_pod two_pod = {0, 2};
+  uint128 three(3);
+  uint128_pod three_pod = {0, 3};
+  uint128 big(1, 0);
+  uint128_pod big_pod = {1, 0};
+
+  EXPECT_EQ(zero, zero_pod);
+  EXPECT_EQ(zero_pod, zero);
+  EXPECT_EQ(zero_pod, zero_pod);
+  EXPECT_EQ(one, one_pod);
+  EXPECT_EQ(one_pod, one);
+  EXPECT_EQ(one_pod, one_pod);
+  EXPECT_EQ(two, two_pod);
+  EXPECT_EQ(two_pod, two);
+  EXPECT_EQ(two_pod, two_pod);
+
+  EXPECT_NE(one, two_pod);
+  EXPECT_NE(one_pod, two);
+  EXPECT_NE(one_pod, two_pod);
+
+  EXPECT_LT(one, two_pod);
+  EXPECT_LT(one_pod, two);
+  EXPECT_LT(one_pod, two_pod);
+  EXPECT_LE(one, one_pod);
+  EXPECT_LE(one_pod, one);
+  EXPECT_LE(one_pod, one_pod);
+  EXPECT_LE(one, two_pod);
+  EXPECT_LE(one_pod, two);
+  EXPECT_LE(one_pod, two_pod);
+
+  EXPECT_GT(two, one_pod);
+  EXPECT_GT(two_pod, one);
+  EXPECT_GT(two_pod, one_pod);
+  EXPECT_GE(two, two_pod);
+  EXPECT_GE(two_pod, two);
+  EXPECT_GE(two_pod, two_pod);
+  EXPECT_GE(two, one_pod);
+  EXPECT_GE(two_pod, one);
+  EXPECT_GE(two_pod, one_pod);
+
+  EXPECT_EQ(three, one | two_pod);
+  EXPECT_EQ(three, one_pod | two);
+  EXPECT_EQ(three, one_pod | two_pod);
+  EXPECT_EQ(one, three & one_pod);
+  EXPECT_EQ(one, three_pod & one);
+  EXPECT_EQ(one, three_pod & one_pod);
+  EXPECT_EQ(two, three ^ one_pod);
+  EXPECT_EQ(two, three_pod ^ one);
+  EXPECT_EQ(two, three_pod ^ one_pod);
+  EXPECT_EQ(two, three & (~one));
+  EXPECT_EQ(three, ~~three);
+
+  EXPECT_EQ(two, two_pod << 0);
+  EXPECT_EQ(two, one_pod << 1);
+  EXPECT_EQ(big, one_pod << 64);
+  EXPECT_EQ(zero, one_pod << 128);
+  EXPECT_EQ(two, two_pod >> 0);
+  EXPECT_EQ(one, two_pod >> 1);
+  EXPECT_EQ(one, big_pod >> 64);
+
+  EXPECT_EQ(one, zero + one_pod);
+  EXPECT_EQ(one, zero_pod + one);
+  EXPECT_EQ(one, zero_pod + one_pod);
+  EXPECT_EQ(one, two - one_pod);
+  EXPECT_EQ(one, two_pod - one);
+  EXPECT_EQ(one, two_pod - one_pod);
+}
+
+TEST(Int128, OperatorAssignReturnRef) {
+  uint128 v(1);
+  (v += 4) -= 3;
+  EXPECT_EQ(2, v);
+}
+
+TEST(Int128, Multiply) {
+  uint128 a, b, c;
+
+  // Zero test.
+  a = 0;
+  b = 0;
+  c = a * b;
+  EXPECT_EQ(0, c);
+
+  // Max carries.
+  a = uint128(0) - 1;
+  b = uint128(0) - 1;
+  c = a * b;
+  EXPECT_EQ(1, c);
+
+  // Self-operation with max carries.
+  c = uint128(0) - 1;
+  c *= c;
+  EXPECT_EQ(1, c);
+
+  // 1-bit x 1-bit.
+  for (int i = 0; i < 64; ++i) {
+    for (int j = 0; j < 64; ++j) {
+      a = uint128(1) << i;
+      b = uint128(1) << j;
+      c = a * b;
+      EXPECT_EQ(uint128(1) << (i+j), c);
+    }
+  }
+
+  // Verified with dc.
+  a = uint128(GOOGLE_ULONGLONG(0xffffeeeeddddcccc),
+              GOOGLE_ULONGLONG(0xbbbbaaaa99998888));
+  b = uint128(GOOGLE_ULONGLONG(0x7777666655554444),
+              GOOGLE_ULONGLONG(0x3333222211110000));
+  c = a * b;
+  EXPECT_EQ(uint128(GOOGLE_ULONGLONG(0x530EDA741C71D4C3),
+                    GOOGLE_ULONGLONG(0xBF25975319080000)), c);
+  EXPECT_EQ(0, c - b * a);
+  EXPECT_EQ(a*a - b*b, (a+b) * (a-b));
+
+  // Verified with dc.
+  a = uint128(GOOGLE_ULONGLONG(0x0123456789abcdef),
+              GOOGLE_ULONGLONG(0xfedcba9876543210));
+  b = uint128(GOOGLE_ULONGLONG(0x02468ace13579bdf),
+              GOOGLE_ULONGLONG(0xfdb97531eca86420));
+  c = a * b;
+  EXPECT_EQ(uint128(GOOGLE_ULONGLONG(0x97a87f4f261ba3f2),
+                    GOOGLE_ULONGLONG(0x342d0bbf48948200)), c);
+  EXPECT_EQ(0, c - b * a);
+  EXPECT_EQ(a*a - b*b, (a+b) * (a-b));
+}
+
+TEST(Int128, AliasTests) {
+  uint128 x1(1, 2);
+  uint128 x2(2, 4);
+  x1 += x1;
+  EXPECT_EQ(x2, x1);
+
+  uint128 x3(1, static_cast<uint64>(1) << 63);
+  uint128 x4(3, 0);
+  x3 += x3;
+  EXPECT_EQ(x4, x3);
+}
+
+#ifdef PROTOBUF_HAS_DEATH_TEST
+TEST(Int128, DivideByZeroCheckFails) {
+  uint128 a = 0;
+  uint128 b = 0;
+  EXPECT_DEATH(a / b, "Division or mod by zero:");
+  a = 123;
+  EXPECT_DEATH(a / b, "Division or mod by zero:");
+}
+
+TEST(Int128, ModByZeroCheckFails) {
+  uint128 a = 0;
+  uint128 b = 0;
+  EXPECT_DEATH(a % b, "Division or mod by zero:");
+  a = 123;
+  EXPECT_DEATH(a % b, "Division or mod by zero:");
+}
+#endif  // PROTOBUF_HAS_DEATH_TEST
+
+TEST(Int128, DivideAndMod) {
+  // a := q * b + r
+  uint128 a, b, q, r;
+
+  // Zero test.
+  a = 0;
+  b = 123;
+  q = a / b;
+  r = a % b;
+  EXPECT_EQ(0, q);
+  EXPECT_EQ(0, r);
+
+  a = uint128(GOOGLE_ULONGLONG(0x530eda741c71d4c3),
+              GOOGLE_ULONGLONG(0xbf25975319080000));
+  q = uint128(GOOGLE_ULONGLONG(0x4de2cab081),
+              GOOGLE_ULONGLONG(0x14c34ab4676e4bab));
+  b = uint128(0x1110001);
+  r = uint128(0x3eb455);
+  ASSERT_EQ(a, q * b + r);  // Sanity-check.
+
+  uint128 result_q, result_r;
+  result_q = a / b;
+  result_r = a % b;
+  EXPECT_EQ(q, result_q);
+  EXPECT_EQ(r, result_r);
+
+  // Try the other way around.
+  swap(q, b);
+  result_q = a / b;
+  result_r = a % b;
+  EXPECT_EQ(q, result_q);
+  EXPECT_EQ(r, result_r);
+  // Restore.
+  swap(b, q);
+
+  // Dividend < divisor; result should be q:0 r:<dividend>.
+  swap(a, b);
+  result_q = a / b;
+  result_r = a % b;
+  EXPECT_EQ(0, result_q);
+  EXPECT_EQ(a, result_r);
+  // Try the other way around.
+  swap(a, q);
+  result_q = a / b;
+  result_r = a % b;
+  EXPECT_EQ(0, result_q);
+  EXPECT_EQ(a, result_r);
+  // Restore.
+  swap(q, a);
+  swap(b, a);
+
+  // Try a large remainder.
+  b = a / 2 + 1;
+  uint128 expected_r(GOOGLE_ULONGLONG(0x29876d3a0e38ea61),
+                     GOOGLE_ULONGLONG(0xdf92cba98c83ffff));
+  // Sanity checks.
+  ASSERT_EQ(a / 2 - 1, expected_r);
+  ASSERT_EQ(a, b + expected_r);
+  result_q = a / b;
+  result_r = a % b;
+  EXPECT_EQ(1, result_q);
+  EXPECT_EQ(expected_r, result_r);
+}
+
+static uint64 RandomUint64() {
+  uint64 v1 = rand();
+  uint64 v2 = rand();
+  uint64 v3 = rand();
+  return v1 * v2 + v3;
+}
+
+TEST(Int128, DivideAndModRandomInputs) {
+  const int kNumIters = 1 << 18;
+  for (int i = 0; i < kNumIters; ++i) {
+    const uint128 a(RandomUint64(), RandomUint64());
+    const uint128 b(RandomUint64(), RandomUint64());
+    if (b == 0) {
+      continue;  // Avoid a div-by-zero.
+    }
+    const uint128 q = a / b;
+    const uint128 r = a % b;
+    ASSERT_EQ(a, b * q + r);
+  }
+}
+
+#ifdef GOOGLE_PROTOBUF_HAS_CONSTEXPR
+TEST(Int128, ConstexprTest) {
+  constexpr uint128 zero;
+  constexpr uint128 one = 1;
+  constexpr uint128_pod pod = {2, 3};
+  constexpr uint128 from_pod = pod;
+  constexpr uint128 minus_two = -2;
+  EXPECT_EQ(one, uint128(1));
+  EXPECT_EQ(from_pod, uint128(2, 3));
+  EXPECT_EQ(minus_two, uint128(-1ULL, -2ULL));
+}
+
+TEST(Int128, Traits) {
+  EXPECT_TRUE(std::is_trivially_copy_constructible<uint128>::value);
+  EXPECT_TRUE(std::is_trivially_copy_assignable<uint128>::value);
+  EXPECT_TRUE(std::is_trivially_destructible<uint128>::value);
+}
+#endif  // GOOGLE_PROTOBUF_HAS_CONSTEXPR
+
+TEST(Int128, OStream) {
+  struct {
+    uint128 val;
+    std::ios_base::fmtflags flags;
+    std::streamsize width;
+    char fill;
+    const char* rep;
+  } cases[] = {
+        // zero with different bases
+        {uint128(0), std::ios::dec, 0, '_', "0"},
+        {uint128(0), std::ios::oct, 0, '_', "0"},
+        {uint128(0), std::ios::hex, 0, '_', "0"},
+        // crossover between lo_ and hi_
+        {uint128(0, -1), std::ios::dec, 0, '_', "18446744073709551615"},
+        {uint128(0, -1), std::ios::oct, 0, '_', "1777777777777777777777"},
+        {uint128(0, -1), std::ios::hex, 0, '_', "ffffffffffffffff"},
+        {uint128(1, 0), std::ios::dec, 0, '_', "18446744073709551616"},
+        {uint128(1, 0), std::ios::oct, 0, '_', "2000000000000000000000"},
+        {uint128(1, 0), std::ios::hex, 0, '_', "10000000000000000"},
+        // just the top bit
+        {uint128(GOOGLE_ULONGLONG(0x8000000000000000), 0), std::ios::dec, 0, '_',
+         "170141183460469231731687303715884105728"},
+        {uint128(GOOGLE_ULONGLONG(0x8000000000000000), 0), std::ios::oct, 0, '_',
+         "2000000000000000000000000000000000000000000"},
+        {uint128(GOOGLE_ULONGLONG(0x8000000000000000), 0), std::ios::hex, 0, '_',
+         "80000000000000000000000000000000"},
+        // maximum uint128 value
+        {uint128(-1, -1), std::ios::dec, 0, '_',
+         "340282366920938463463374607431768211455"},
+        {uint128(-1, -1), std::ios::oct, 0, '_',
+         "3777777777777777777777777777777777777777777"},
+        {uint128(-1, -1), std::ios::hex, 0, '_',
+         "ffffffffffffffffffffffffffffffff"},
+        // uppercase
+        {uint128(-1, -1), std::ios::hex | std::ios::uppercase, 0, '_',
+         "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"},
+        // showbase
+        {uint128(1), std::ios::dec | std::ios::showbase, 0, '_', "1"},
+        {uint128(1), std::ios::oct | std::ios::showbase, 0, '_', "01"},
+        {uint128(1), std::ios::hex | std::ios::showbase, 0, '_', "0x1"},
+        // showbase does nothing on zero
+        {uint128(0), std::ios::dec | std::ios::showbase, 0, '_', "0"},
+        {uint128(0), std::ios::oct | std::ios::showbase, 0, '_', "0"},
+        {uint128(0), std::ios::hex | std::ios::showbase, 0, '_', "0"},
+        // showpos does nothing on unsigned types
+        {uint128(1), std::ios::dec | std::ios::showpos, 0, '_', "1"},
+        // padding
+        {uint128(9), std::ios::dec, 6, '_', "_____9"},
+        {uint128(12345), std::ios::dec, 6, '_', "_12345"},
+        // left adjustment
+        {uint128(9), std::ios::dec | std::ios::left, 6, '_', "9_____"},
+        {uint128(12345), std::ios::dec | std::ios::left, 6, '_', "12345_"},
+  };
+  for (size_t i = 0; i < GOOGLE_ARRAYSIZE(cases); ++i) {
+    ostringstream os;
+    os.flags(cases[i].flags);
+    os.width(cases[i].width);
+    os.fill(cases[i].fill);
+    os << cases[i].val;
+    EXPECT_EQ(cases[i].rep, os.str());
+  }
+}
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/logging.h b/src/google/protobuf/stubs/logging.h
new file mode 100644
index 0000000..3108db8
--- /dev/null
+++ b/src/google/protobuf/stubs/logging.h
@@ -0,0 +1,237 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GOOGLE_PROTOBUF_STUBS_LOGGING_H_
+#define GOOGLE_PROTOBUF_STUBS_LOGGING_H_
+
+#include <google/protobuf/stubs/macros.h>
+#include <google/protobuf/stubs/port.h>
+
+// ===================================================================
+// emulates google3/base/logging.h
+
+namespace google {
+namespace protobuf {
+
+enum LogLevel {
+  LOGLEVEL_INFO,     // Informational.  This is never actually used by
+                     // libprotobuf.
+  LOGLEVEL_WARNING,  // Warns about issues that, although not technically a
+                     // problem now, could cause problems in the future.  For
+                     // example, a // warning will be printed when parsing a
+                     // message that is near the message size limit.
+  LOGLEVEL_ERROR,    // An error occurred which should never happen during
+                     // normal use.
+  LOGLEVEL_FATAL,    // An error occurred from which the library cannot
+                     // recover.  This usually indicates a programming error
+                     // in the code which calls the library, especially when
+                     // compiled in debug mode.
+
+#ifdef NDEBUG
+  LOGLEVEL_DFATAL = LOGLEVEL_ERROR
+#else
+  LOGLEVEL_DFATAL = LOGLEVEL_FATAL
+#endif
+};
+
+class StringPiece;
+namespace util {
+class Status;
+}
+class uint128;
+namespace internal {
+
+class LogFinisher;
+
+class LIBPROTOBUF_EXPORT LogMessage {
+ public:
+  LogMessage(LogLevel level, const char* filename, int line);
+  ~LogMessage();
+
+  LogMessage& operator<<(const std::string& value);
+  LogMessage& operator<<(const char* value);
+  LogMessage& operator<<(char value);
+  LogMessage& operator<<(int value);
+  LogMessage& operator<<(uint value);
+  LogMessage& operator<<(long value);
+  LogMessage& operator<<(unsigned long value);
+  LogMessage& operator<<(long long value);
+  LogMessage& operator<<(unsigned long long value);
+  LogMessage& operator<<(double value);
+  LogMessage& operator<<(void* value);
+  LogMessage& operator<<(const StringPiece& value);
+  LogMessage& operator<<(const ::google::protobuf::util::Status& status);
+  LogMessage& operator<<(const uint128& value);
+
+ private:
+  friend class LogFinisher;
+  void Finish();
+
+  LogLevel level_;
+  const char* filename_;
+  int line_;
+  std::string message_;
+};
+
+// Used to make the entire "LOG(BLAH) << etc." expression have a void return
+// type and print a newline after each message.
+class LIBPROTOBUF_EXPORT LogFinisher {
+ public:
+  void operator=(LogMessage& other);
+};
+
+template<typename T>
+bool IsOk(T status) { return status.ok(); }
+template<>
+inline bool IsOk(bool status) { return status; }
+
+}  // namespace internal
+
+// Undef everything in case we're being mixed with some other Google library
+// which already defined them itself.  Presumably all Google libraries will
+// support the same syntax for these so it should not be a big deal if they
+// end up using our definitions instead.
+#undef GOOGLE_LOG
+#undef GOOGLE_LOG_IF
+
+#undef GOOGLE_CHECK
+#undef GOOGLE_CHECK_OK
+#undef GOOGLE_CHECK_EQ
+#undef GOOGLE_CHECK_NE
+#undef GOOGLE_CHECK_LT
+#undef GOOGLE_CHECK_LE
+#undef GOOGLE_CHECK_GT
+#undef GOOGLE_CHECK_GE
+#undef GOOGLE_CHECK_NOTNULL
+
+#undef GOOGLE_DLOG
+#undef GOOGLE_DCHECK
+#undef GOOGLE_DCHECK_OK
+#undef GOOGLE_DCHECK_EQ
+#undef GOOGLE_DCHECK_NE
+#undef GOOGLE_DCHECK_LT
+#undef GOOGLE_DCHECK_LE
+#undef GOOGLE_DCHECK_GT
+#undef GOOGLE_DCHECK_GE
+
+#define GOOGLE_LOG(LEVEL)                                                 \
+  ::google::protobuf::internal::LogFinisher() =                           \
+    ::google::protobuf::internal::LogMessage(                             \
+      ::google::protobuf::LOGLEVEL_##LEVEL, __FILE__, __LINE__)
+#define GOOGLE_LOG_IF(LEVEL, CONDITION) \
+  !(CONDITION) ? (void)0 : GOOGLE_LOG(LEVEL)
+
+#define GOOGLE_CHECK(EXPRESSION) \
+  GOOGLE_LOG_IF(FATAL, !(EXPRESSION)) << "CHECK failed: " #EXPRESSION ": "
+#define GOOGLE_CHECK_OK(A) GOOGLE_CHECK(::google::protobuf::internal::IsOk(A))
+#define GOOGLE_CHECK_EQ(A, B) GOOGLE_CHECK((A) == (B))
+#define GOOGLE_CHECK_NE(A, B) GOOGLE_CHECK((A) != (B))
+#define GOOGLE_CHECK_LT(A, B) GOOGLE_CHECK((A) <  (B))
+#define GOOGLE_CHECK_LE(A, B) GOOGLE_CHECK((A) <= (B))
+#define GOOGLE_CHECK_GT(A, B) GOOGLE_CHECK((A) >  (B))
+#define GOOGLE_CHECK_GE(A, B) GOOGLE_CHECK((A) >= (B))
+
+namespace internal {
+template<typename T>
+T* CheckNotNull(const char* /* file */, int /* line */,
+                const char* name, T* val) {
+  if (val == NULL) {
+    GOOGLE_LOG(FATAL) << name;
+  }
+  return val;
+}
+}  // namespace internal
+#define GOOGLE_CHECK_NOTNULL(A) \
+  ::google::protobuf::internal::CheckNotNull(\
+      __FILE__, __LINE__, "'" #A "' must not be NULL", (A))
+
+#ifdef NDEBUG
+
+#define GOOGLE_DLOG GOOGLE_LOG_IF(INFO, false)
+
+#define GOOGLE_DCHECK(EXPRESSION) while(false) GOOGLE_CHECK(EXPRESSION)
+#define GOOGLE_DCHECK_OK(E) GOOGLE_DCHECK(::google::protobuf::internal::IsOk(E))
+#define GOOGLE_DCHECK_EQ(A, B) GOOGLE_DCHECK((A) == (B))
+#define GOOGLE_DCHECK_NE(A, B) GOOGLE_DCHECK((A) != (B))
+#define GOOGLE_DCHECK_LT(A, B) GOOGLE_DCHECK((A) <  (B))
+#define GOOGLE_DCHECK_LE(A, B) GOOGLE_DCHECK((A) <= (B))
+#define GOOGLE_DCHECK_GT(A, B) GOOGLE_DCHECK((A) >  (B))
+#define GOOGLE_DCHECK_GE(A, B) GOOGLE_DCHECK((A) >= (B))
+
+#else  // NDEBUG
+
+#define GOOGLE_DLOG GOOGLE_LOG
+
+#define GOOGLE_DCHECK    GOOGLE_CHECK
+#define GOOGLE_DCHECK_OK GOOGLE_CHECK_OK
+#define GOOGLE_DCHECK_EQ GOOGLE_CHECK_EQ
+#define GOOGLE_DCHECK_NE GOOGLE_CHECK_NE
+#define GOOGLE_DCHECK_LT GOOGLE_CHECK_LT
+#define GOOGLE_DCHECK_LE GOOGLE_CHECK_LE
+#define GOOGLE_DCHECK_GT GOOGLE_CHECK_GT
+#define GOOGLE_DCHECK_GE GOOGLE_CHECK_GE
+
+#endif  // !NDEBUG
+
+typedef void LogHandler(LogLevel level, const char* filename, int line,
+                        const std::string& message);
+
+// The protobuf library sometimes writes warning and error messages to
+// stderr.  These messages are primarily useful for developers, but may
+// also help end users figure out a problem.  If you would prefer that
+// these messages be sent somewhere other than stderr, call SetLogHandler()
+// to set your own handler.  This returns the old handler.  Set the handler
+// to NULL to ignore log messages (but see also LogSilencer, below).
+//
+// Obviously, SetLogHandler is not thread-safe.  You should only call it
+// at initialization time, and probably not from library code.  If you
+// simply want to suppress log messages temporarily (e.g. because you
+// have some code that tends to trigger them frequently and you know
+// the warnings are not important to you), use the LogSilencer class
+// below.
+LIBPROTOBUF_EXPORT LogHandler* SetLogHandler(LogHandler* new_func);
+
+// Create a LogSilencer if you want to temporarily suppress all log
+// messages.  As long as any LogSilencer objects exist, non-fatal
+// log messages will be discarded (the current LogHandler will *not*
+// be called).  Constructing a LogSilencer is thread-safe.  You may
+// accidentally suppress log messages occurring in another thread, but
+// since messages are generally for debugging purposes only, this isn't
+// a big deal.  If you want to intercept log messages, use SetLogHandler().
+class LIBPROTOBUF_EXPORT LogSilencer {
+ public:
+  LogSilencer();
+  ~LogSilencer();
+};
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_LOGGING_H_
diff --git a/src/google/protobuf/stubs/macros.h b/src/google/protobuf/stubs/macros.h
new file mode 100644
index 0000000..0e9a9ec
--- /dev/null
+++ b/src/google/protobuf/stubs/macros.h
@@ -0,0 +1,168 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GOOGLE_PROTOBUF_MACROS_H__
+#define GOOGLE_PROTOBUF_MACROS_H__
+
+#include <google/protobuf/stubs/port.h>
+
+namespace google {
+namespace protobuf {
+
+#undef GOOGLE_DISALLOW_EVIL_CONSTRUCTORS
+#define GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(TypeName)    \
+  TypeName(const TypeName&);                           \
+  void operator=(const TypeName&)
+
+#undef GOOGLE_DISALLOW_IMPLICIT_CONSTRUCTORS
+#define GOOGLE_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+  TypeName();                                           \
+  TypeName(const TypeName&);                            \
+  void operator=(const TypeName&)
+
+// ===================================================================
+// from google3/base/basictypes.h
+
+// The GOOGLE_ARRAYSIZE(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example.
+//
+// GOOGLE_ARRAYSIZE catches a few type errors.  If you see a compiler error
+//
+//   "warning: division by zero in ..."
+//
+// when using GOOGLE_ARRAYSIZE, you are (wrongfully) giving it a pointer.
+// You should only use GOOGLE_ARRAYSIZE on statically allocated arrays.
+//
+// The following comments are on the implementation details, and can
+// be ignored by the users.
+//
+// ARRAYSIZE(arr) works by inspecting sizeof(arr) (the # of bytes in
+// the array) and sizeof(*(arr)) (the # of bytes in one array
+// element).  If the former is divisible by the latter, perhaps arr is
+// indeed an array, in which case the division result is the # of
+// elements in the array.  Otherwise, arr cannot possibly be an array,
+// and we generate a compiler error to prevent the code from
+// compiling.
+//
+// Since the size of bool is implementation-defined, we need to cast
+// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
+// result has type size_t.
+//
+// This macro is not perfect as it wrongfully accepts certain
+// pointers, namely where the pointer size is divisible by the pointee
+// size.  Since all our code has to go through a 32-bit compiler,
+// where a pointer is 4 bytes, this means all pointers to a type whose
+// size is 3 or greater than 4 will be (righteously) rejected.
+//
+// Kudos to Jorg Brown for this simple and elegant implementation.
+
+#undef GOOGLE_ARRAYSIZE
+#define GOOGLE_ARRAYSIZE(a) \
+  ((sizeof(a) / sizeof(*(a))) / \
+   static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+// The COMPILE_ASSERT macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+//   COMPILE_ASSERT(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES,
+//                  content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+//   COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+namespace internal {
+
+template <bool>
+struct CompileAssert {
+};
+
+}  // namespace internal
+
+#undef GOOGLE_COMPILE_ASSERT
+#if __cplusplus >= 201103L
+#define GOOGLE_COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
+#else
+#define GOOGLE_COMPILE_ASSERT(expr, msg) \
+  ::google::protobuf::internal::CompileAssert<(bool(expr))> \
+          msg[bool(expr) ? 1 : -1]; \
+  (void)msg
+// Implementation details of COMPILE_ASSERT:
+//
+// - COMPILE_ASSERT works by defining an array type that has -1
+//   elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+//     #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+//   does not work, as gcc supports variable-length arrays whose sizes
+//   are determined at run-time (this is gcc's extension and not part
+//   of the C++ standard).  As a result, gcc fails to reject the
+//   following code with the simple definition:
+//
+//     int foo;
+//     COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
+//                               // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+//   expr is a compile-time constant.  (Template arguments must be
+//   determined at compile-time.)
+//
+// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
+//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written
+//
+//     CompileAssert<bool(expr)>
+//
+//   instead, these compilers will refuse to compile
+//
+//     COMPILE_ASSERT(5 > 0, some_message);
+//
+//   (They seem to think the ">" in "5 > 0" marks the end of the
+//   template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+//     ((expr) ? 1 : -1).
+//
+//   This is to avoid running into a bug in MS VC 7.1, which
+//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+#endif  // __cplusplus >= 201103L
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_MACROS_H__
diff --git a/src/google/protobuf/stubs/map_util.h b/src/google/protobuf/stubs/map_util.h
new file mode 100644
index 0000000..4cccbbe
--- /dev/null
+++ b/src/google/protobuf/stubs/map_util.h
@@ -0,0 +1,769 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2014 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// from google3/util/gtl/map_util.h
+// Author: Anton Carver
+
+#ifndef GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__
+#define GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__
+
+#include <stddef.h>
+#include <iterator>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <google/protobuf/stubs/common.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+// Local implementation of RemoveConst to avoid including base/type_traits.h.
+template <class T> struct RemoveConst { typedef T type; };
+template <class T> struct RemoveConst<const T> : RemoveConst<T> {};
+}  // namespace internal
+
+//
+// Find*()
+//
+
+// Returns a const reference to the value associated with the given key if it
+// exists. Crashes otherwise.
+//
+// This is intended as a replacement for operator[] as an rvalue (for reading)
+// when the key is guaranteed to exist.
+//
+// operator[] for lookup is discouraged for several reasons:
+//  * It has a side-effect of inserting missing keys
+//  * It is not thread-safe (even when it is not inserting, it can still
+//      choose to resize the underlying storage)
+//  * It invalidates iterators (when it chooses to resize)
+//  * It default constructs a value object even if it doesn't need to
+//
+// This version assumes the key is printable, and includes it in the fatal log
+// message.
+template <class Collection>
+const typename Collection::value_type::second_type&
+FindOrDie(const Collection& collection,
+          const typename Collection::value_type::first_type& key) {
+  typename Collection::const_iterator it = collection.find(key);
+  GOOGLE_CHECK(it != collection.end()) << "Map key not found: " << key;
+  return it->second;
+}
+
+// Same as above, but returns a non-const reference.
+template <class Collection>
+typename Collection::value_type::second_type&
+FindOrDie(Collection& collection,  // NOLINT
+          const typename Collection::value_type::first_type& key) {
+  typename Collection::iterator it = collection.find(key);
+  GOOGLE_CHECK(it != collection.end()) << "Map key not found: " << key;
+  return it->second;
+}
+
+// Same as FindOrDie above, but doesn't log the key on failure.
+template <class Collection>
+const typename Collection::value_type::second_type&
+FindOrDieNoPrint(const Collection& collection,
+                 const typename Collection::value_type::first_type& key) {
+  typename Collection::const_iterator it = collection.find(key);
+  GOOGLE_CHECK(it != collection.end()) << "Map key not found";
+  return it->second;
+}
+
+// Same as above, but returns a non-const reference.
+template <class Collection>
+typename Collection::value_type::second_type&
+FindOrDieNoPrint(Collection& collection,  // NOLINT
+                 const typename Collection::value_type::first_type& key) {
+  typename Collection::iterator it = collection.find(key);
+  GOOGLE_CHECK(it != collection.end()) << "Map key not found";
+  return it->second;
+}
+
+// Returns a const reference to the value associated with the given key if it
+// exists, otherwise returns a const reference to the provided default value.
+//
+// WARNING: If a temporary object is passed as the default "value,"
+// this function will return a reference to that temporary object,
+// which will be destroyed at the end of the statement. A common
+// example: if you have a map with string values, and you pass a char*
+// as the default "value," either use the returned value immediately
+// or store it in a string (not string&).
+// Details: http://go/findwithdefault
+template <class Collection>
+const typename Collection::value_type::second_type&
+FindWithDefault(const Collection& collection,
+                const typename Collection::value_type::first_type& key,
+                const typename Collection::value_type::second_type& value) {
+  typename Collection::const_iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return value;
+  }
+  return it->second;
+}
+
+// Returns a pointer to the const value associated with the given key if it
+// exists, or NULL otherwise.
+template <class Collection>
+const typename Collection::value_type::second_type*
+FindOrNull(const Collection& collection,
+           const typename Collection::value_type::first_type& key) {
+  typename Collection::const_iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return 0;
+  }
+  return &it->second;
+}
+
+// Same as above but returns a pointer to the non-const value.
+template <class Collection>
+typename Collection::value_type::second_type*
+FindOrNull(Collection& collection,  // NOLINT
+           const typename Collection::value_type::first_type& key) {
+  typename Collection::iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return 0;
+  }
+  return &it->second;
+}
+
+// Returns the pointer value associated with the given key. If none is found,
+// NULL is returned. The function is designed to be used with a map of keys to
+// pointers.
+//
+// This function does not distinguish between a missing key and a key mapped
+// to a NULL value.
+template <class Collection>
+typename Collection::value_type::second_type
+FindPtrOrNull(const Collection& collection,
+              const typename Collection::value_type::first_type& key) {
+  typename Collection::const_iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return typename Collection::value_type::second_type();
+  }
+  return it->second;
+}
+
+// Same as above, except takes non-const reference to collection.
+//
+// This function is needed for containers that propagate constness to the
+// pointee, such as boost::ptr_map.
+template <class Collection>
+typename Collection::value_type::second_type
+FindPtrOrNull(Collection& collection,  // NOLINT
+              const typename Collection::value_type::first_type& key) {
+  typename Collection::iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return typename Collection::value_type::second_type();
+  }
+  return it->second;
+}
+
+// Finds the pointer value associated with the given key in a map whose values
+// are linked_ptrs. Returns NULL if key is not found.
+template <class Collection>
+typename Collection::value_type::second_type::element_type*
+FindLinkedPtrOrNull(const Collection& collection,
+                    const typename Collection::value_type::first_type& key) {
+  typename Collection::const_iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return 0;
+  }
+  // Since linked_ptr::get() is a const member returning a non const,
+  // we do not need a version of this function taking a non const collection.
+  return it->second.get();
+}
+
+// Same as above, but dies if the key is not found.
+template <class Collection>
+typename Collection::value_type::second_type::element_type&
+FindLinkedPtrOrDie(const Collection& collection,
+                   const typename Collection::value_type::first_type& key) {
+  typename Collection::const_iterator it = collection.find(key);
+  CHECK(it != collection.end()) <<  "key not found: " << key;
+  // Since linked_ptr::operator*() is a const member returning a non const,
+  // we do not need a version of this function taking a non const collection.
+  return *it->second;
+}
+
+// Finds the value associated with the given key and copies it to *value (if not
+// NULL). Returns false if the key was not found, true otherwise.
+template <class Collection, class Key, class Value>
+bool FindCopy(const Collection& collection,
+              const Key& key,
+              Value* const value) {
+  typename Collection::const_iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return false;
+  }
+  if (value) {
+    *value = it->second;
+  }
+  return true;
+}
+
+//
+// Contains*()
+//
+
+// Returns true if and only if the given collection contains the given key.
+template <class Collection, class Key>
+bool ContainsKey(const Collection& collection, const Key& key) {
+  return collection.find(key) != collection.end();
+}
+
+// Returns true if and only if the given collection contains the given key-value
+// pair.
+template <class Collection, class Key, class Value>
+bool ContainsKeyValuePair(const Collection& collection,
+                          const Key& key,
+                          const Value& value) {
+  typedef typename Collection::const_iterator const_iterator;
+  std::pair<const_iterator, const_iterator> range = collection.equal_range(key);
+  for (const_iterator it = range.first; it != range.second; ++it) {
+    if (it->second == value) {
+      return true;
+    }
+  }
+  return false;
+}
+
+//
+// Insert*()
+//
+
+// Inserts the given key-value pair into the collection. Returns true if and
+// only if the key from the given pair didn't previously exist. Otherwise, the
+// value in the map is replaced with the value from the given pair.
+template <class Collection>
+bool InsertOrUpdate(Collection* const collection,
+                    const typename Collection::value_type& vt) {
+  std::pair<typename Collection::iterator, bool> ret = collection->insert(vt);
+  if (!ret.second) {
+    // update
+    ret.first->second = vt.second;
+    return false;
+  }
+  return true;
+}
+
+// Same as above, except that the key and value are passed separately.
+template <class Collection>
+bool InsertOrUpdate(Collection* const collection,
+                    const typename Collection::value_type::first_type& key,
+                    const typename Collection::value_type::second_type& value) {
+  return InsertOrUpdate(
+      collection, typename Collection::value_type(key, value));
+}
+
+// Inserts/updates all the key-value pairs from the range defined by the
+// iterators "first" and "last" into the given collection.
+template <class Collection, class InputIterator>
+void InsertOrUpdateMany(Collection* const collection,
+                        InputIterator first, InputIterator last) {
+  for (; first != last; ++first) {
+    InsertOrUpdate(collection, *first);
+  }
+}
+
+// Change the value associated with a particular key in a map or hash_map
+// of the form map<Key, Value*> which owns the objects pointed to by the
+// value pointers.  If there was an existing value for the key, it is deleted.
+// True indicates an insert took place, false indicates an update + delete.
+template <class Collection>
+bool InsertAndDeleteExisting(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key,
+    const typename Collection::value_type::second_type& value) {
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(key, value));
+  if (!ret.second) {
+    delete ret.first->second;
+    ret.first->second = value;
+    return false;
+  }
+  return true;
+}
+
+// Inserts the given key and value into the given collection if and only if the
+// given key did NOT already exist in the collection. If the key previously
+// existed in the collection, the value is not changed. Returns true if the
+// key-value pair was inserted; returns false if the key was already present.
+template <class Collection>
+bool InsertIfNotPresent(Collection* const collection,
+                        const typename Collection::value_type& vt) {
+  return collection->insert(vt).second;
+}
+
+// Same as above except the key and value are passed separately.
+template <class Collection>
+bool InsertIfNotPresent(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key,
+    const typename Collection::value_type::second_type& value) {
+  return InsertIfNotPresent(
+      collection, typename Collection::value_type(key, value));
+}
+
+// Same as above except dies if the key already exists in the collection.
+template <class Collection>
+void InsertOrDie(Collection* const collection,
+                 const typename Collection::value_type& value) {
+  CHECK(InsertIfNotPresent(collection, value)) << "duplicate value: " << value;
+}
+
+// Same as above except doesn't log the value on error.
+template <class Collection>
+void InsertOrDieNoPrint(Collection* const collection,
+                        const typename Collection::value_type& value) {
+  CHECK(InsertIfNotPresent(collection, value)) << "duplicate value.";
+}
+
+// Inserts the key-value pair into the collection. Dies if key was already
+// present.
+template <class Collection>
+void InsertOrDie(Collection* const collection,
+                 const typename Collection::value_type::first_type& key,
+                 const typename Collection::value_type::second_type& data) {
+  GOOGLE_CHECK(InsertIfNotPresent(collection, key, data))
+      << "duplicate key: " << key;
+}
+
+// Same as above except doesn't log the key on error.
+template <class Collection>
+void InsertOrDieNoPrint(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key,
+    const typename Collection::value_type::second_type& data) {
+  GOOGLE_CHECK(InsertIfNotPresent(collection, key, data)) << "duplicate key.";
+}
+
+// Inserts a new key and default-initialized value. Dies if the key was already
+// present. Returns a reference to the value. Example usage:
+//
+// map<int, SomeProto> m;
+// SomeProto& proto = InsertKeyOrDie(&m, 3);
+// proto.set_field("foo");
+template <class Collection>
+typename Collection::value_type::second_type& InsertKeyOrDie(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key) {
+  typedef typename Collection::value_type value_type;
+  std::pair<typename Collection::iterator, bool> res =
+      collection->insert(value_type(key, typename value_type::second_type()));
+  GOOGLE_CHECK(res.second) << "duplicate key: " << key;
+  return res.first->second;
+}
+
+//
+// Lookup*()
+//
+
+// Looks up a given key and value pair in a collection and inserts the key-value
+// pair if it's not already present. Returns a reference to the value associated
+// with the key.
+template <class Collection>
+typename Collection::value_type::second_type&
+LookupOrInsert(Collection* const collection,
+               const typename Collection::value_type& vt) {
+  return collection->insert(vt).first->second;
+}
+
+// Same as above except the key-value are passed separately.
+template <class Collection>
+typename Collection::value_type::second_type&
+LookupOrInsert(Collection* const collection,
+               const typename Collection::value_type::first_type& key,
+               const typename Collection::value_type::second_type& value) {
+  return LookupOrInsert(
+      collection, typename Collection::value_type(key, value));
+}
+
+// Counts the number of equivalent elements in the given "sequence", and stores
+// the results in "count_map" with element as the key and count as the value.
+//
+// Example:
+//   vector<string> v = {"a", "b", "c", "a", "b"};
+//   map<string, int> m;
+//   AddTokenCounts(v, 1, &m);
+//   assert(m["a"] == 2);
+//   assert(m["b"] == 2);
+//   assert(m["c"] == 1);
+template <typename Sequence, typename Collection>
+void AddTokenCounts(
+    const Sequence& sequence,
+    const typename Collection::value_type::second_type& increment,
+    Collection* const count_map) {
+  for (typename Sequence::const_iterator it = sequence.begin();
+       it != sequence.end(); ++it) {
+    typename Collection::value_type::second_type& value =
+        LookupOrInsert(count_map, *it,
+                       typename Collection::value_type::second_type());
+    value += increment;
+  }
+}
+
+// Returns a reference to the value associated with key. If not found, a value
+// is default constructed on the heap and added to the map.
+//
+// This function is useful for containers of the form map<Key, Value*>, where
+// inserting a new key, value pair involves constructing a new heap-allocated
+// Value, and storing a pointer to that in the collection.
+template <class Collection>
+typename Collection::value_type::second_type&
+LookupOrInsertNew(Collection* const collection,
+                  const typename Collection::value_type::first_type& key) {
+  typedef typename std::iterator_traits<
+    typename Collection::value_type::second_type>::value_type Element;
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(
+          key,
+          static_cast<typename Collection::value_type::second_type>(NULL)));
+  if (ret.second) {
+    ret.first->second = new Element();
+  }
+  return ret.first->second;
+}
+
+// Same as above but constructs the value using the single-argument constructor
+// and the given "arg".
+template <class Collection, class Arg>
+typename Collection::value_type::second_type&
+LookupOrInsertNew(Collection* const collection,
+                  const typename Collection::value_type::first_type& key,
+                  const Arg& arg) {
+  typedef typename std::iterator_traits<
+    typename Collection::value_type::second_type>::value_type Element;
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(
+          key,
+          static_cast<typename Collection::value_type::second_type>(NULL)));
+  if (ret.second) {
+    ret.first->second = new Element(arg);
+  }
+  return ret.first->second;
+}
+
+// Lookup of linked/shared pointers is used in two scenarios:
+//
+// Use LookupOrInsertNewLinkedPtr if the container owns the elements.
+// In this case it is fine working with the raw pointer as long as it is
+// guaranteed that no other thread can delete/update an accessed element.
+// A mutex will need to lock the container operation as well as the use
+// of the returned elements. Finding an element may be performed using
+// FindLinkedPtr*().
+//
+// Use LookupOrInsertNewSharedPtr if the container does not own the elements
+// for their whole lifetime. This is typically the case when a reader allows
+// parallel updates to the container. In this case a Mutex only needs to lock
+// container operations, but all element operations must be performed on the
+// shared pointer. Finding an element must be performed using FindPtr*() and
+// cannot be done with FindLinkedPtr*() even though it compiles.
+
+// Lookup a key in a map or hash_map whose values are linked_ptrs.  If it is
+// missing, set collection[key].reset(new Value::element_type) and return that.
+// Value::element_type must be default constructable.
+template <class Collection>
+typename Collection::value_type::second_type::element_type*
+LookupOrInsertNewLinkedPtr(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key) {
+  typedef typename Collection::value_type::second_type Value;
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(key, Value()));
+  if (ret.second) {
+    ret.first->second.reset(new typename Value::element_type);
+  }
+  return ret.first->second.get();
+}
+
+// A variant of LookupOrInsertNewLinkedPtr where the value is constructed using
+// a single-parameter constructor.  Note: the constructor argument is computed
+// even if it will not be used, so only values cheap to compute should be passed
+// here.  On the other hand it does not matter how expensive the construction of
+// the actual stored value is, as that only occurs if necessary.
+template <class Collection, class Arg>
+typename Collection::value_type::second_type::element_type*
+LookupOrInsertNewLinkedPtr(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key,
+    const Arg& arg) {
+  typedef typename Collection::value_type::second_type Value;
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(key, Value()));
+  if (ret.second) {
+    ret.first->second.reset(new typename Value::element_type(arg));
+  }
+  return ret.first->second.get();
+}
+
+// Lookup a key in a map or hash_map whose values are shared_ptrs.  If it is
+// missing, set collection[key].reset(new Value::element_type). Unlike
+// LookupOrInsertNewLinkedPtr, this function returns the shared_ptr instead of
+// the raw pointer. Value::element_type must be default constructable.
+template <class Collection>
+typename Collection::value_type::second_type&
+LookupOrInsertNewSharedPtr(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key) {
+  typedef typename Collection::value_type::second_type SharedPtr;
+  typedef typename Collection::value_type::second_type::element_type Element;
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(key, SharedPtr()));
+  if (ret.second) {
+    ret.first->second.reset(new Element());
+  }
+  return ret.first->second;
+}
+
+// A variant of LookupOrInsertNewSharedPtr where the value is constructed using
+// a single-parameter constructor.  Note: the constructor argument is computed
+// even if it will not be used, so only values cheap to compute should be passed
+// here.  On the other hand it does not matter how expensive the construction of
+// the actual stored value is, as that only occurs if necessary.
+template <class Collection, class Arg>
+typename Collection::value_type::second_type&
+LookupOrInsertNewSharedPtr(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key,
+    const Arg& arg) {
+  typedef typename Collection::value_type::second_type SharedPtr;
+  typedef typename Collection::value_type::second_type::element_type Element;
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(key, SharedPtr()));
+  if (ret.second) {
+    ret.first->second.reset(new Element(arg));
+  }
+  return ret.first->second;
+}
+
+//
+// Misc Utility Functions
+//
+
+// Updates the value associated with the given key. If the key was not already
+// present, then the key-value pair are inserted and "previous" is unchanged. If
+// the key was already present, the value is updated and "*previous" will
+// contain a copy of the old value.
+//
+// InsertOrReturnExisting has complementary behavior that returns the
+// address of an already existing value, rather than updating it.
+template <class Collection>
+bool UpdateReturnCopy(Collection* const collection,
+                      const typename Collection::value_type::first_type& key,
+                      const typename Collection::value_type::second_type& value,
+                      typename Collection::value_type::second_type* previous) {
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(key, value));
+  if (!ret.second) {
+    // update
+    if (previous) {
+      *previous = ret.first->second;
+    }
+    ret.first->second = value;
+    return true;
+  }
+  return false;
+}
+
+// Same as above except that the key and value are passed as a pair.
+template <class Collection>
+bool UpdateReturnCopy(Collection* const collection,
+                      const typename Collection::value_type& vt,
+                      typename Collection::value_type::second_type* previous) {
+  std::pair<typename Collection::iterator, bool> ret = collection->insert(vt);
+  if (!ret.second) {
+    // update
+    if (previous) {
+      *previous = ret.first->second;
+    }
+    ret.first->second = vt.second;
+    return true;
+  }
+  return false;
+}
+
+// Tries to insert the given key-value pair into the collection. Returns NULL if
+// the insert succeeds. Otherwise, returns a pointer to the existing value.
+//
+// This complements UpdateReturnCopy in that it allows to update only after
+// verifying the old value and still insert quickly without having to look up
+// twice. Unlike UpdateReturnCopy this also does not come with the issue of an
+// undefined previous* in case new data was inserted.
+template <class Collection>
+typename Collection::value_type::second_type* const
+InsertOrReturnExisting(Collection* const collection,
+                       const typename Collection::value_type& vt) {
+  std::pair<typename Collection::iterator, bool> ret = collection->insert(vt);
+  if (ret.second) {
+    return NULL;  // Inserted, no existing previous value.
+  } else {
+    return &ret.first->second;  // Return address of already existing value.
+  }
+}
+
+// Same as above, except for explicit key and data.
+template <class Collection>
+typename Collection::value_type::second_type* const
+InsertOrReturnExisting(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key,
+    const typename Collection::value_type::second_type& data) {
+  return InsertOrReturnExisting(collection,
+                                typename Collection::value_type(key, data));
+}
+
+// Erases the collection item identified by the given key, and returns the value
+// associated with that key. It is assumed that the value (i.e., the
+// mapped_type) is a pointer. Returns NULL if the key was not found in the
+// collection.
+//
+// Examples:
+//   map<string, MyType*> my_map;
+//
+// One line cleanup:
+//     delete EraseKeyReturnValuePtr(&my_map, "abc");
+//
+// Use returned value:
+//     scoped_ptr<MyType> value_ptr(EraseKeyReturnValuePtr(&my_map, "abc"));
+//     if (value_ptr.get())
+//       value_ptr->DoSomething();
+//
+template <class Collection>
+typename Collection::value_type::second_type EraseKeyReturnValuePtr(
+    Collection* const collection,
+    const typename Collection::value_type::first_type& key) {
+  typename Collection::iterator it = collection->find(key);
+  if (it == collection->end()) {
+    return NULL;
+  }
+  typename Collection::value_type::second_type v = it->second;
+  collection->erase(it);
+  return v;
+}
+
+// Inserts all the keys from map_container into key_container, which must
+// support insert(MapContainer::key_type).
+//
+// Note: any initial contents of the key_container are not cleared.
+template <class MapContainer, class KeyContainer>
+void InsertKeysFromMap(const MapContainer& map_container,
+                       KeyContainer* key_container) {
+  GOOGLE_CHECK(key_container != NULL);
+  for (typename MapContainer::const_iterator it = map_container.begin();
+       it != map_container.end(); ++it) {
+    key_container->insert(it->first);
+  }
+}
+
+// Appends all the keys from map_container into key_container, which must
+// support push_back(MapContainer::key_type).
+//
+// Note: any initial contents of the key_container are not cleared.
+template <class MapContainer, class KeyContainer>
+void AppendKeysFromMap(const MapContainer& map_container,
+                       KeyContainer* key_container) {
+  GOOGLE_CHECK(key_container != NULL);
+  for (typename MapContainer::const_iterator it = map_container.begin();
+       it != map_container.end(); ++it) {
+    key_container->push_back(it->first);
+  }
+}
+
+// A more specialized overload of AppendKeysFromMap to optimize reallocations
+// for the common case in which we're appending keys to a vector and hence can
+// (and sometimes should) call reserve() first.
+//
+// (It would be possible to play SFINAE games to call reserve() for any
+// container that supports it, but this seems to get us 99% of what we need
+// without the complexity of a SFINAE-based solution.)
+template <class MapContainer, class KeyType>
+void AppendKeysFromMap(const MapContainer& map_container,
+                       vector<KeyType>* key_container) {
+  GOOGLE_CHECK(key_container != NULL);
+  // We now have the opportunity to call reserve(). Calling reserve() every
+  // time is a bad idea for some use cases: libstdc++'s implementation of
+  // vector<>::reserve() resizes the vector's backing store to exactly the
+  // given size (unless it's already at least that big). Because of this,
+  // the use case that involves appending a lot of small maps (total size
+  // N) one by one to a vector would be O(N^2). But never calling reserve()
+  // loses the opportunity to improve the use case of adding from a large
+  // map to an empty vector (this improves performance by up to 33%). A
+  // number of heuristics are possible; see the discussion in
+  // cl/34081696. Here we use the simplest one.
+  if (key_container->empty()) {
+    key_container->reserve(map_container.size());
+  }
+  for (typename MapContainer::const_iterator it = map_container.begin();
+       it != map_container.end(); ++it) {
+    key_container->push_back(it->first);
+  }
+}
+
+// Inserts all the values from map_container into value_container, which must
+// support push_back(MapContainer::mapped_type).
+//
+// Note: any initial contents of the value_container are not cleared.
+template <class MapContainer, class ValueContainer>
+void AppendValuesFromMap(const MapContainer& map_container,
+                         ValueContainer* value_container) {
+  GOOGLE_CHECK(value_container != NULL);
+  for (typename MapContainer::const_iterator it = map_container.begin();
+       it != map_container.end(); ++it) {
+    value_container->push_back(it->second);
+  }
+}
+
+// A more specialized overload of AppendValuesFromMap to optimize reallocations
+// for the common case in which we're appending values to a vector and hence
+// can (and sometimes should) call reserve() first.
+//
+// (It would be possible to play SFINAE games to call reserve() for any
+// container that supports it, but this seems to get us 99% of what we need
+// without the complexity of a SFINAE-based solution.)
+template <class MapContainer, class ValueType>
+void AppendValuesFromMap(const MapContainer& map_container,
+                         vector<ValueType>* value_container) {
+  GOOGLE_CHECK(value_container != NULL);
+  // See AppendKeysFromMap for why this is done.
+  if (value_container->empty()) {
+    value_container->reserve(map_container.size());
+  }
+  for (typename MapContainer::const_iterator it = map_container.begin();
+       it != map_container.end(); ++it) {
+    value_container->push_back(it->second);
+  }
+}
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__
diff --git a/src/google/protobuf/stubs/mathlimits.cc b/src/google/protobuf/stubs/mathlimits.cc
new file mode 100644
index 0000000..0373b2b
--- /dev/null
+++ b/src/google/protobuf/stubs/mathlimits.cc
@@ -0,0 +1,144 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// All Rights Reserved.
+//
+// Author: Maxim Lifantsev
+//
+
+#include <google/protobuf/stubs/mathlimits.h>
+
+#include <google/protobuf/stubs/common.h>
+
+namespace google {
+namespace protobuf {
+
+// MSVC++ 2005 and older compilers think the header declaration was a
+// definition, and erroneously flag these as a duplicate definition.
+#if defined(COMPILER_MSVC) || __cpluscplus < 201103L
+
+#define DEF_COMMON_LIMITS(Type)
+#define DEF_UNSIGNED_INT_LIMITS(Type)
+#define DEF_SIGNED_INT_LIMITS(Type)
+#define DEF_PRECISION_LIMITS(Type)
+
+#else
+
+#define DEF_COMMON_LIMITS(Type) \
+const bool MathLimits<Type>::kIsSigned; \
+const bool MathLimits<Type>::kIsInteger; \
+const int MathLimits<Type>::kMin10Exp; \
+const int MathLimits<Type>::kMax10Exp;
+
+#define DEF_UNSIGNED_INT_LIMITS(Type) \
+DEF_COMMON_LIMITS(Type) \
+const Type MathLimits<Type>::kPosMin; \
+const Type MathLimits<Type>::kPosMax; \
+const Type MathLimits<Type>::kMin; \
+const Type MathLimits<Type>::kMax; \
+const Type MathLimits<Type>::kEpsilon; \
+const Type MathLimits<Type>::kStdError;
+
+#define DEF_SIGNED_INT_LIMITS(Type) \
+DEF_UNSIGNED_INT_LIMITS(Type) \
+const Type MathLimits<Type>::kNegMin; \
+const Type MathLimits<Type>::kNegMax;
+
+#define DEF_PRECISION_LIMITS(Type) \
+const int MathLimits<Type>::kPrecisionDigits;
+
+#endif  // not COMPILER_MSVC
+
+// http://en.wikipedia.org/wiki/Quadruple_precision_floating-point_format#Double-double_arithmetic
+// With some compilers (gcc 4.6.x) on some platforms (powerpc64),
+// "long double" is implemented as a pair of double: "double double" format.
+// This causes a problem with epsilon (eps).
+// eps is the smallest positive number such that 1.0 + eps > 1.0
+//
+// Normal format:  1.0 + e = 1.0...01      // N-1 zeros for N fraction bits
+// D-D format:     1.0 + e = 1.000...0001  // epsilon can be very small
+//
+// In the normal format, 1.0 + e has to fit in one stretch of bits.
+// The maximum rounding error is half of eps.
+//
+// In the double-double format, 1.0 + e splits across two doubles:
+// 1.0 in the high double, e in the low double, and they do not have to
+// be contiguous.  The maximum rounding error on a value close to 1.0 is
+// much larger than eps.
+//
+// Some code checks for errors by comparing a computed value to a golden
+// value +/- some multiple of the maximum rounding error.  The maximum
+// rounding error is not available so we use eps as an approximation
+// instead.  That fails when long double is in the double-double format.
+// Therefore, we define kStdError as a multiple of
+// max(DBL_EPSILON * DBL_EPSILON, kEpsilon) rather than a multiple of kEpsilon.
+
+#define DEF_FP_LIMITS(Type, PREFIX) \
+DEF_COMMON_LIMITS(Type) \
+const Type MathLimits<Type>::kPosMin = PREFIX##_MIN; \
+const Type MathLimits<Type>::kPosMax = PREFIX##_MAX; \
+const Type MathLimits<Type>::kMin = -MathLimits<Type>::kPosMax; \
+const Type MathLimits<Type>::kMax = MathLimits<Type>::kPosMax; \
+const Type MathLimits<Type>::kNegMin = -MathLimits<Type>::kPosMin; \
+const Type MathLimits<Type>::kNegMax = -MathLimits<Type>::kPosMax; \
+const Type MathLimits<Type>::kEpsilon = PREFIX##_EPSILON; \
+/* 32 is 5 bits of mantissa error; should be adequate for common errors */ \
+const Type MathLimits<Type>::kStdError = \
+  32 * (DBL_EPSILON * DBL_EPSILON > MathLimits<Type>::kEpsilon \
+      ? DBL_EPSILON * DBL_EPSILON : MathLimits<Type>::kEpsilon); \
+DEF_PRECISION_LIMITS(Type) \
+const Type MathLimits<Type>::kNaN = HUGE_VAL - HUGE_VAL; \
+const Type MathLimits<Type>::kPosInf = HUGE_VAL; \
+const Type MathLimits<Type>::kNegInf = -HUGE_VAL;
+
+// The following are *not* casts!
+DEF_SIGNED_INT_LIMITS(int8)
+DEF_SIGNED_INT_LIMITS(int16)  // NOLINT(readability/casting)
+DEF_SIGNED_INT_LIMITS(int32)  // NOLINT(readability/casting)
+DEF_SIGNED_INT_LIMITS(int64)  // NOLINT(readability/casting)
+DEF_UNSIGNED_INT_LIMITS(uint8)
+DEF_UNSIGNED_INT_LIMITS(uint16)  // NOLINT(readability/casting)
+DEF_UNSIGNED_INT_LIMITS(uint32)  // NOLINT(readability/casting)
+DEF_UNSIGNED_INT_LIMITS(uint64)  // NOLINT(readability/casting)
+
+DEF_SIGNED_INT_LIMITS(long int)
+DEF_UNSIGNED_INT_LIMITS(unsigned long int)
+
+DEF_FP_LIMITS(float, FLT)
+DEF_FP_LIMITS(double, DBL)
+DEF_FP_LIMITS(long double, LDBL);
+
+#undef DEF_COMMON_LIMITS
+#undef DEF_SIGNED_INT_LIMITS
+#undef DEF_UNSIGNED_INT_LIMITS
+#undef DEF_FP_LIMITS
+#undef DEF_PRECISION_LIMITS
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/mathlimits.h b/src/google/protobuf/stubs/mathlimits.h
new file mode 100644
index 0000000..d984694
--- /dev/null
+++ b/src/google/protobuf/stubs/mathlimits.h
@@ -0,0 +1,279 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// All Rights Reserved.
+//
+// Author: Maxim Lifantsev
+//
+// Useful integer and floating point limits and type traits.
+//
+// This partially replaces/duplictes numeric_limits<> from <limits>.
+// We get a Google-style class that we have a greater control over
+// and thus can add new features to it or fix whatever happens to be broken in
+// numeric_limits for the compilers we use.
+//
+
+#ifndef UTIL_MATH_MATHLIMITS_H__
+#define UTIL_MATH_MATHLIMITS_H__
+
+// <math.h> lacks a lot of prototypes. However, this file needs <math.h> to
+// access old-fashioned isinf et al. Even worse more: this file must not
+// include <cmath> because that breaks the definition of isinf with gcc 4.9.
+//
+// TODO(mec): after C++11 everywhere, use <cmath> and std::isinf in this file.
+#include <math.h>
+#include <string.h>
+
+#include <cfloat>
+
+#include <google/protobuf/stubs/common.h>
+
+// ========================================================================= //
+
+// Useful integer and floating point limits and type traits.
+// This is just for the documentation;
+// real members are defined in our specializations below.
+namespace google {
+namespace protobuf {
+template<typename T> struct MathLimits {
+  // Type name.
+  typedef T Type;
+  // Unsigned version of the Type with the same byte size.
+  // Same as Type for floating point and unsigned types.
+  typedef T UnsignedType;
+  // If the type supports negative values.
+  static const bool kIsSigned;
+  // If the type supports only integer values.
+  static const bool kIsInteger;
+  // Magnitude-wise smallest representable positive value.
+  static const Type kPosMin;
+  // Magnitude-wise largest representable positive value.
+  static const Type kPosMax;
+  // Smallest representable value.
+  static const Type kMin;
+  // Largest representable value.
+  static const Type kMax;
+  // Magnitude-wise smallest representable negative value.
+  // Present only if kIsSigned.
+  static const Type kNegMin;
+  // Magnitude-wise largest representable negative value.
+  // Present only if kIsSigned.
+  static const Type kNegMax;
+  // Smallest integer x such that 10^x is representable.
+  static const int kMin10Exp;
+  // Largest integer x such that 10^x is representable.
+  static const int kMax10Exp;
+  // Smallest positive value such that Type(1) + kEpsilon != Type(1)
+  static const Type kEpsilon;
+  // Typical rounding error that is enough to cover
+  // a few simple floating-point operations.
+  // Slightly larger than kEpsilon to account for a few rounding errors.
+  // Is zero if kIsInteger.
+  static const Type kStdError;
+  // Number of decimal digits of mantissa precision.
+  // Present only if !kIsInteger.
+  static const int kPrecisionDigits;
+  // Not a number, i.e. result of 0/0.
+  // Present only if !kIsInteger.
+  static const Type kNaN;
+  // Positive infinity, i.e. result of 1/0.
+  // Present only if !kIsInteger.
+  static const Type kPosInf;
+  // Negative infinity, i.e. result of -1/0.
+  // Present only if !kIsInteger.
+  static const Type kNegInf;
+
+  // NOTE: Special floating point values behave
+  // in a special (but mathematically-logical) way
+  // in terms of (in)equalty comparison and mathematical operations
+  // -- see out unittest for examples.
+
+  // Special floating point value testers.
+  // Present in integer types for convenience.
+  static bool IsFinite(const Type x);
+  static bool IsNaN(const Type x);
+  static bool IsInf(const Type x);
+  static bool IsPosInf(const Type x);
+  static bool IsNegInf(const Type x);
+};
+
+// ========================================================================= //
+
+// All #define-s below are simply to refactor the declarations of
+// MathLimits template specializations.
+// They are all #undef-ined below.
+
+// The hoop-jumping in *_INT_(MAX|MIN) below is so that the compiler does not
+// get an overflow while computing the constants.
+
+#define SIGNED_INT_MAX(Type) \
+  (((Type(1) << (sizeof(Type)*8 - 2)) - 1) + (Type(1) << (sizeof(Type)*8 - 2)))
+
+#define SIGNED_INT_MIN(Type) \
+  (-(Type(1) << (sizeof(Type)*8 - 2)) - (Type(1) << (sizeof(Type)*8 - 2)))
+
+#define UNSIGNED_INT_MAX(Type) \
+  (((Type(1) << (sizeof(Type)*8 - 1)) - 1) + (Type(1) << (sizeof(Type)*8 - 1)))
+
+// Compile-time selected log10-related constants for integer types.
+#define SIGNED_MAX_10_EXP(Type) \
+  (sizeof(Type) == 1 ? 2 : ( \
+    sizeof(Type) == 2 ? 4 : ( \
+      sizeof(Type) == 4 ? 9 : ( \
+        sizeof(Type) == 8 ? 18 : -1))))
+
+#define UNSIGNED_MAX_10_EXP(Type) \
+  (sizeof(Type) == 1 ? 2 : ( \
+    sizeof(Type) == 2 ? 4 : ( \
+      sizeof(Type) == 4 ? 9 : ( \
+        sizeof(Type) == 8 ? 19 : -1))))
+
+#define DECL_INT_LIMIT_FUNCS \
+  static bool IsFinite(const Type /*x*/) { return true; } \
+  static bool IsNaN(const Type /*x*/) { return false; } \
+  static bool IsInf(const Type /*x*/) { return false; } \
+  static bool IsPosInf(const Type /*x*/) { return false; } \
+  static bool IsNegInf(const Type /*x*/) { return false; }
+
+#define DECL_SIGNED_INT_LIMITS(IntType, UnsignedIntType) \
+template<> \
+struct LIBPROTOBUF_EXPORT MathLimits<IntType> { \
+  typedef IntType Type; \
+  typedef UnsignedIntType UnsignedType; \
+  static const bool kIsSigned = true; \
+  static const bool kIsInteger = true; \
+  static const Type kPosMin = 1; \
+  static const Type kPosMax = SIGNED_INT_MAX(Type); \
+  static const Type kMin = SIGNED_INT_MIN(Type); \
+  static const Type kMax = kPosMax; \
+  static const Type kNegMin = -1; \
+  static const Type kNegMax = kMin; \
+  static const int kMin10Exp = 0; \
+  static const int kMax10Exp = SIGNED_MAX_10_EXP(Type); \
+  static const Type kEpsilon = 1; \
+  static const Type kStdError = 0; \
+  DECL_INT_LIMIT_FUNCS \
+};
+
+#define DECL_UNSIGNED_INT_LIMITS(IntType) \
+template<> \
+struct LIBPROTOBUF_EXPORT MathLimits<IntType> { \
+  typedef IntType Type; \
+  typedef IntType UnsignedType; \
+  static const bool kIsSigned = false; \
+  static const bool kIsInteger = true; \
+  static const Type kPosMin = 1; \
+  static const Type kPosMax = UNSIGNED_INT_MAX(Type); \
+  static const Type kMin = 0; \
+  static const Type kMax = kPosMax; \
+  static const int kMin10Exp = 0; \
+  static const int kMax10Exp = UNSIGNED_MAX_10_EXP(Type); \
+  static const Type kEpsilon = 1; \
+  static const Type kStdError = 0; \
+  DECL_INT_LIMIT_FUNCS \
+};
+
+DECL_SIGNED_INT_LIMITS(signed char, unsigned char)
+DECL_SIGNED_INT_LIMITS(signed short int, unsigned short int)
+DECL_SIGNED_INT_LIMITS(signed int, unsigned int)
+DECL_SIGNED_INT_LIMITS(signed long int, unsigned long int)
+DECL_SIGNED_INT_LIMITS(signed long long int, unsigned long long int)
+DECL_UNSIGNED_INT_LIMITS(unsigned char)
+DECL_UNSIGNED_INT_LIMITS(unsigned short int)
+DECL_UNSIGNED_INT_LIMITS(unsigned int)
+DECL_UNSIGNED_INT_LIMITS(unsigned long int)
+DECL_UNSIGNED_INT_LIMITS(unsigned long long int)
+
+#undef DECL_SIGNED_INT_LIMITS
+#undef DECL_UNSIGNED_INT_LIMITS
+#undef SIGNED_INT_MAX
+#undef SIGNED_INT_MIN
+#undef UNSIGNED_INT_MAX
+#undef SIGNED_MAX_10_EXP
+#undef UNSIGNED_MAX_10_EXP
+#undef DECL_INT_LIMIT_FUNCS
+
+// ========================================================================= //
+#ifdef WIN32  // Lacks built-in isnan() and isinf()
+#define DECL_FP_LIMIT_FUNCS \
+  static bool IsFinite(const Type x) { return _finite(x); } \
+  static bool IsNaN(const Type x) { return _isnan(x); } \
+  static bool IsInf(const Type x) { return (_fpclass(x) & (_FPCLASS_NINF | _FPCLASS_PINF)) != 0; } \
+  static bool IsPosInf(const Type x) { return _fpclass(x) == _FPCLASS_PINF; } \
+  static bool IsNegInf(const Type x) { return _fpclass(x) == _FPCLASS_NINF; }
+#else
+#define DECL_FP_LIMIT_FUNCS \
+  static bool IsFinite(const Type x) { return !isinf(x)  &&  !isnan(x); } \
+  static bool IsNaN(const Type x) { return isnan(x); } \
+  static bool IsInf(const Type x) { return isinf(x); } \
+  static bool IsPosInf(const Type x) { return isinf(x)  &&  x > 0; } \
+  static bool IsNegInf(const Type x) { return isinf(x)  &&  x < 0; }
+#endif
+
+// We can't put floating-point constant values in the header here because
+// such constants are not considered to be primitive-type constants by gcc.
+// CAVEAT: Hence, they are going to be initialized only during
+// the global objects construction time.
+#define DECL_FP_LIMITS(FP_Type, PREFIX) \
+template<> \
+struct LIBPROTOBUF_EXPORT MathLimits<FP_Type> { \
+  typedef FP_Type Type; \
+  typedef FP_Type UnsignedType; \
+  static const bool kIsSigned = true; \
+  static const bool kIsInteger = false; \
+  static const Type kPosMin; \
+  static const Type kPosMax; \
+  static const Type kMin; \
+  static const Type kMax; \
+  static const Type kNegMin; \
+  static const Type kNegMax; \
+  static const int kMin10Exp = PREFIX##_MIN_10_EXP; \
+  static const int kMax10Exp = PREFIX##_MAX_10_EXP; \
+  static const Type kEpsilon; \
+  static const Type kStdError; \
+  static const int kPrecisionDigits = PREFIX##_DIG; \
+  static const Type kNaN; \
+  static const Type kPosInf; \
+  static const Type kNegInf; \
+  DECL_FP_LIMIT_FUNCS \
+};
+
+DECL_FP_LIMITS(float, FLT)
+DECL_FP_LIMITS(double, DBL)
+DECL_FP_LIMITS(long double, LDBL)
+
+#undef DECL_FP_LIMITS
+#undef DECL_FP_LIMIT_FUNCS
+
+// ========================================================================= //
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // UTIL_MATH_MATHLIMITS_H__
diff --git a/src/google/protobuf/stubs/mathutil.h b/src/google/protobuf/stubs/mathutil.h
new file mode 100644
index 0000000..3a1ef8a
--- /dev/null
+++ b/src/google/protobuf/stubs/mathutil.h
@@ -0,0 +1,162 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef GOOGLE_PROTOBUF_STUBS_MATHUTIL_H_
+#define GOOGLE_PROTOBUF_STUBS_MATHUTIL_H_
+
+#include <float.h>
+#include <math.h>
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/logging.h>
+#include <google/protobuf/stubs/mathlimits.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+template<typename T>
+bool IsNan(T value) {
+  return false;
+}
+template<>
+inline bool IsNan(float value) {
+#ifdef _MSC_VER
+  return _isnan(value);
+#else
+  return isnan(value);
+#endif
+}
+template<>
+inline bool IsNan(double value) {
+#ifdef _MSC_VER
+  return _isnan(value);
+#else
+  return isnan(value);
+#endif
+}
+
+template<typename T>
+bool AlmostEquals(T a, T b) {
+  return a == b;
+}
+template<>
+inline bool AlmostEquals(float a, float b) {
+  return fabs(a - b) < 32 * FLT_EPSILON;
+}
+
+template<>
+inline bool AlmostEquals(double a, double b) {
+  return fabs(a - b) < 32 * DBL_EPSILON;
+}
+}  // namespace internal
+
+class MathUtil {
+ public:
+  template<typename T>
+  static T Sign(T value) {
+    if (value == T(0) || ::google::protobuf::internal::IsNan<T>(value)) {
+      return value;
+    }
+    return value > T(0) ? value : -value;
+  }
+
+  template<typename T>
+  static bool AlmostEquals(T a, T b) {
+    return ::google::protobuf::internal::AlmostEquals(a, b);
+  }
+
+  // Largest of two values.
+  // Works correctly for special floating point values.
+  // Note: 0.0 and -0.0 are not differentiated by Max (Max(0.0, -0.0) is -0.0),
+  // which should be OK because, although they (can) have different
+  // bit representation, they are observably the same when examined
+  // with arithmetic and (in)equality operators.
+  template<typename T>
+  static T Max(const T x, const T y) {
+    return MathLimits<T>::IsNaN(x) || x > y ? x : y;
+  }
+
+  // Absolute value of x
+  // Works correctly for unsigned types and
+  // for special floating point values.
+  // Note: 0.0 and -0.0 are not differentiated by Abs (Abs(0.0) is -0.0),
+  // which should be OK: see the comment for Max above.
+  template<typename T>
+  static T Abs(const T x) {
+    return x > T(0) ? x : -x;
+  }
+
+  // Absolute value of the difference between two numbers.
+  // Works correctly for signed types and special floating point values.
+  template<typename T>
+  static typename MathLimits<T>::UnsignedType AbsDiff(const T x, const T y) {
+    // Carries out arithmetic as unsigned to avoid overflow.
+    typedef typename MathLimits<T>::UnsignedType R;
+    return x > y ? R(x) - R(y) : R(y) - R(x);
+  }
+
+  // If two (usually floating point) numbers are within a certain
+  // fraction of their magnitude or within a certain absolute margin of error.
+  // This is the same as the following but faster:
+  //   WithinFraction(x, y, fraction)  ||  WithinMargin(x, y, margin)
+  // E.g. WithinFraction(0.0, 1e-10, 1e-5) is false but
+  //      WithinFractionOrMargin(0.0, 1e-10, 1e-5, 1e-5) is true.
+  template<typename T>
+  static bool WithinFractionOrMargin(const T x, const T y,
+                                     const T fraction, const T margin);
+};
+
+template<typename T>
+bool MathUtil::WithinFractionOrMargin(const T x, const T y,
+                                      const T fraction, const T margin) {
+  // Not just "0 <= fraction" to fool the compiler for unsigned types.
+  GOOGLE_DCHECK((T(0) < fraction || T(0) == fraction) &&
+         fraction < T(1) &&
+         margin >= T(0));
+
+  // Template specialization will convert the if() condition to a constant,
+  // which will cause the compiler to generate code for either the "if" part
+  // or the "then" part.  In this way we avoid a compiler warning
+  // about a potential integer overflow in crosstool v12 (gcc 4.3.1).
+  if (MathLimits<T>::kIsInteger) {
+    return x == y;
+  } else {
+    // IsFinite checks are to make kPosInf and kNegInf not within fraction
+    if (!MathLimits<T>::IsFinite(x) && !MathLimits<T>::IsFinite(y)) {
+      return false;
+    }
+    T relative_margin = static_cast<T>(fraction * Max(Abs(x), Abs(y)));
+    return AbsDiff(x, y) <= Max(margin, relative_margin);
+  }
+}
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_MATHUTIL_H_
diff --git a/src/google/protobuf/stubs/mutex.h b/src/google/protobuf/stubs/mutex.h
new file mode 100644
index 0000000..7ef1cb6
--- /dev/null
+++ b/src/google/protobuf/stubs/mutex.h
@@ -0,0 +1,148 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GOOGLE_PROTOBUF_STUBS_MUTEX_H_
+#define GOOGLE_PROTOBUF_STUBS_MUTEX_H_
+
+#ifdef GOOGLE_PROTOBUF_NO_THREADLOCAL
+#include <pthread.h>
+#endif
+
+#include <google/protobuf/stubs/macros.h>
+
+// ===================================================================
+// emulates google3/base/mutex.h
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// A Mutex is a non-reentrant (aka non-recursive) mutex.  At most one thread T
+// may hold a mutex at a given time.  If T attempts to Lock() the same Mutex
+// while holding it, T will deadlock.
+class LIBPROTOBUF_EXPORT Mutex {
+ public:
+  // Create a Mutex that is not held by anybody.
+  Mutex();
+
+  // Destructor
+  ~Mutex();
+
+  // Block if necessary until this Mutex is free, then acquire it exclusively.
+  void Lock();
+
+  // Release this Mutex.  Caller must hold it exclusively.
+  void Unlock();
+
+  // Crash if this Mutex is not held exclusively by this thread.
+  // May fail to crash when it should; will never crash when it should not.
+  void AssertHeld();
+
+ private:
+  struct Internal;
+  Internal* mInternal;
+
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Mutex);
+};
+
+// Undefine the macros  to workaround the conflicts with Google internal
+// MutexLock implementation.
+// TODO(liujisi): Remove the undef once internal macros are removed.
+#undef MutexLock
+#undef ReaderMutexLock
+#undef WriterMutexLock
+#undef MutexLockMaybe
+
+// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
+class LIBPROTOBUF_EXPORT MutexLock {
+ public:
+  explicit MutexLock(Mutex *mu) : mu_(mu) { this->mu_->Lock(); }
+  ~MutexLock() { this->mu_->Unlock(); }
+ private:
+  Mutex *const mu_;
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MutexLock);
+};
+
+// TODO(kenton):  Implement these?  Hard to implement portably.
+typedef MutexLock ReaderMutexLock;
+typedef MutexLock WriterMutexLock;
+
+// MutexLockMaybe is like MutexLock, but is a no-op when mu is NULL.
+class LIBPROTOBUF_EXPORT MutexLockMaybe {
+ public:
+  explicit MutexLockMaybe(Mutex *mu) :
+    mu_(mu) { if (this->mu_ != NULL) { this->mu_->Lock(); } }
+  ~MutexLockMaybe() { if (this->mu_ != NULL) { this->mu_->Unlock(); } }
+ private:
+  Mutex *const mu_;
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MutexLockMaybe);
+};
+
+#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
+template<typename T>
+class ThreadLocalStorage {
+ public:
+  ThreadLocalStorage() {
+    pthread_key_create(&key_, &ThreadLocalStorage::Delete);
+  }
+  ~ThreadLocalStorage() {
+    pthread_key_delete(key_);
+  }
+  T* Get() {
+    T* result = static_cast<T*>(pthread_getspecific(key_));
+    if (result == NULL) {
+      result = new T();
+      pthread_setspecific(key_, result);
+    }
+    return result;
+  }
+ private:
+  static void Delete(void* value) {
+    delete static_cast<T*>(value);
+  }
+  pthread_key_t key_;
+
+  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ThreadLocalStorage);
+};
+#endif
+
+}  // namespace internal
+
+// We made these internal so that they would show up as such in the docs,
+// but we don't want to stick "internal::" in front of them everywhere.
+using internal::Mutex;
+using internal::MutexLock;
+using internal::ReaderMutexLock;
+using internal::WriterMutexLock;
+using internal::MutexLockMaybe;
+
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_MUTEX_H_
diff --git a/src/google/protobuf/stubs/once.cc b/src/google/protobuf/stubs/once.cc
new file mode 100644
index 0000000..889c647
--- /dev/null
+++ b/src/google/protobuf/stubs/once.cc
@@ -0,0 +1,99 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// emulates google3/base/once.h
+//
+// This header is intended to be included only by internal .cc files and
+// generated .pb.cc files.  Users should not use this directly.
+
+#include <google/protobuf/stubs/once.h>
+
+#ifndef GOOGLE_PROTOBUF_NO_THREAD_SAFETY
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#endif
+
+#include <google/protobuf/stubs/atomicops.h>
+
+namespace google {
+namespace protobuf {
+
+namespace {
+
+void SchedYield() {
+#ifdef _WIN32
+  Sleep(0);
+#else  // POSIX
+  sched_yield();
+#endif
+}
+
+}  // namespace
+
+void GoogleOnceInitImpl(ProtobufOnceType* once, Closure* closure) {
+  internal::AtomicWord state = internal::Acquire_Load(once);
+  // Fast path. The provided closure was already executed.
+  if (state == ONCE_STATE_DONE) {
+    return;
+  }
+  // The closure execution did not complete yet. The once object can be in one
+  // of the two following states:
+  //   - UNINITIALIZED: We are the first thread calling this function.
+  //   - EXECUTING_CLOSURE: Another thread is already executing the closure.
+  //
+  // First, try to change the state from UNINITIALIZED to EXECUTING_CLOSURE
+  // atomically.
+  state = internal::Acquire_CompareAndSwap(
+      once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_CLOSURE);
+  if (state == ONCE_STATE_UNINITIALIZED) {
+    // We are the first thread to call this function, so we have to call the
+    // closure.
+    closure->Run();
+    internal::Release_Store(once, ONCE_STATE_DONE);
+  } else {
+    // Another thread has already started executing the closure. We need to
+    // wait until it completes the initialization.
+    while (state == ONCE_STATE_EXECUTING_CLOSURE) {
+      // Note that futex() could be used here on Linux as an improvement.
+      SchedYield();
+      state = internal::Acquire_Load(once);
+    }
+  }
+}
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_NO_THREAD_SAFETY
diff --git a/src/google/protobuf/stubs/once.h b/src/google/protobuf/stubs/once.h
new file mode 100644
index 0000000..1f082c3
--- /dev/null
+++ b/src/google/protobuf/stubs/once.h
@@ -0,0 +1,167 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// emulates google3/base/once.h
+//
+// This header is intended to be included only by internal .cc files and
+// generated .pb.cc files.  Users should not use this directly.
+//
+// This is basically a portable version of pthread_once().
+//
+// This header declares:
+// * A type called ProtobufOnceType.
+// * A macro GOOGLE_PROTOBUF_DECLARE_ONCE() which declares a variable of type
+//   ProtobufOnceType.  This is the only legal way to declare such a variable.
+//   The macro may only be used at the global scope (you cannot create local or
+//   class member variables of this type).
+// * A function GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()).
+//   This function, when invoked multiple times given the same ProtobufOnceType
+//   object, will invoke init_func on the first call only, and will make sure
+//   none of the calls return before that first call to init_func has finished.
+// * The user can provide a parameter which GoogleOnceInit() forwards to the
+//   user-provided function when it is called. Usage example:
+//     int a = 10;
+//     GoogleOnceInit(&my_once, &MyFunctionExpectingIntArgument, &a);
+// * This implementation guarantees that ProtobufOnceType is a POD (i.e. no
+//   static initializer generated).
+//
+// This implements a way to perform lazy initialization.  It's more efficient
+// than using mutexes as no lock is needed if initialization has already
+// happened.
+//
+// Example usage:
+//   void Init();
+//   GOOGLE_PROTOBUF_DECLARE_ONCE(once_init);
+//
+//   // Calls Init() exactly once.
+//   void InitOnce() {
+//     GoogleOnceInit(&once_init, &Init);
+//   }
+//
+// Note that if GoogleOnceInit() is called before main() has begun, it must
+// only be called by the thread that will eventually call main() -- that is,
+// the thread that performs dynamic initialization.  In general this is a safe
+// assumption since people don't usually construct threads before main() starts,
+// but it is technically not guaranteed.  Unfortunately, Win32 provides no way
+// whatsoever to statically-initialize its synchronization primitives, so our
+// only choice is to assume that dynamic initialization is single-threaded.
+
+#ifndef GOOGLE_PROTOBUF_STUBS_ONCE_H__
+#define GOOGLE_PROTOBUF_STUBS_ONCE_H__
+
+#include <google/protobuf/stubs/atomicops.h>
+#include <google/protobuf/stubs/callback.h>
+#include <google/protobuf/stubs/common.h>
+
+namespace google {
+namespace protobuf {
+
+#ifdef GOOGLE_PROTOBUF_NO_THREAD_SAFETY
+
+typedef bool ProtobufOnceType;
+
+#define GOOGLE_PROTOBUF_ONCE_INIT false
+
+inline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()) {
+  if (!*once) {
+    *once = true;
+    init_func();
+  }
+}
+
+template <typename Arg>
+inline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)(Arg),
+    Arg arg) {
+  if (!*once) {
+    *once = true;
+    init_func(arg);
+  }
+}
+
+#else
+
+enum {
+  ONCE_STATE_UNINITIALIZED = 0,
+  ONCE_STATE_EXECUTING_CLOSURE = 1,
+  ONCE_STATE_DONE = 2
+};
+
+typedef internal::AtomicWord ProtobufOnceType;
+
+#define GOOGLE_PROTOBUF_ONCE_INIT ::google::protobuf::ONCE_STATE_UNINITIALIZED
+
+LIBPROTOBUF_EXPORT
+void GoogleOnceInitImpl(ProtobufOnceType* once, Closure* closure);
+
+inline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()) {
+  if (internal::Acquire_Load(once) != ONCE_STATE_DONE) {
+    internal::FunctionClosure0 func(init_func, false);
+    GoogleOnceInitImpl(once, &func);
+  }
+}
+
+template <typename Arg>
+inline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)(Arg*),
+    Arg* arg) {
+  if (internal::Acquire_Load(once) != ONCE_STATE_DONE) {
+    internal::FunctionClosure1<Arg*> func(init_func, false, arg);
+    GoogleOnceInitImpl(once, &func);
+  }
+}
+
+#endif  // GOOGLE_PROTOBUF_NO_THREAD_SAFETY
+
+class GoogleOnceDynamic {
+ public:
+  GoogleOnceDynamic() : state_(GOOGLE_PROTOBUF_ONCE_INIT) { }
+
+  // If this->Init() has not been called before by any thread,
+  // execute (*func_with_arg)(arg) then return.
+  // Otherwise, wait until that prior invocation has finished
+  // executing its function, then return.
+  template<typename T>
+  void Init(void (*func_with_arg)(T*), T* arg) {
+    GoogleOnceInit<T>(&this->state_,
+                      func_with_arg,
+                      arg);
+  }
+ private:
+  ProtobufOnceType state_;
+};
+
+#define GOOGLE_PROTOBUF_DECLARE_ONCE(NAME) \
+  ::google::protobuf::ProtobufOnceType NAME = GOOGLE_PROTOBUF_ONCE_INIT
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_ONCE_H__
diff --git a/src/google/protobuf/stubs/once_unittest.cc b/src/google/protobuf/stubs/once_unittest.cc
new file mode 100644
index 0000000..37def58
--- /dev/null
+++ b/src/google/protobuf/stubs/once_unittest.cc
@@ -0,0 +1,255 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#include <pthread.h>
+#endif
+
+#include <google/protobuf/stubs/once.h>
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+using internal::NewCallback;
+namespace {
+
+class OnceInitTest : public testing::Test {
+ protected:
+  void SetUp() {
+    state_ = INIT_NOT_STARTED;
+    current_test_ = this;
+  }
+
+  // Since ProtobufOnceType is only allowed to be allocated in static storage,
+  // each test must use a different pair of ProtobufOnceType objects which it
+  // must declare itself.
+  void SetOnces(ProtobufOnceType* once, ProtobufOnceType* recursive_once) {
+    once_ = once;
+    recursive_once_ = recursive_once;
+  }
+
+  void InitOnce() {
+    GoogleOnceInit(once_, &InitStatic);
+  }
+  void InitRecursiveOnce() {
+    GoogleOnceInit(recursive_once_, &InitRecursiveStatic);
+  }
+
+  void BlockInit() { init_blocker_.Lock(); }
+  void UnblockInit() { init_blocker_.Unlock(); }
+
+  class TestThread {
+   public:
+    TestThread(Closure* callback)
+        : done_(false), joined_(false), callback_(callback) {
+#ifdef _WIN32
+      thread_ = CreateThread(NULL, 0, &Start, this, 0, NULL);
+#else
+      pthread_create(&thread_, NULL, &Start, this);
+#endif
+    }
+    ~TestThread() {
+      if (!joined_) Join();
+    }
+
+    bool IsDone() {
+      MutexLock lock(&done_mutex_);
+      return done_;
+    }
+    void Join() {
+      joined_ = true;
+#ifdef _WIN32
+      WaitForSingleObject(thread_, INFINITE);
+      CloseHandle(thread_);
+#else
+      pthread_join(thread_, NULL);
+#endif
+    }
+
+   private:
+#ifdef _WIN32
+    HANDLE thread_;
+#else
+    pthread_t thread_;
+#endif
+
+    Mutex done_mutex_;
+    bool done_;
+    bool joined_;
+    Closure* callback_;
+
+#ifdef _WIN32
+    static DWORD WINAPI Start(LPVOID arg) {
+#else
+    static void* Start(void* arg) {
+#endif
+      reinterpret_cast<TestThread*>(arg)->Run();
+      return 0;
+    }
+
+    void Run() {
+      callback_->Run();
+      MutexLock lock(&done_mutex_);
+      done_ = true;
+    }
+  };
+
+  TestThread* RunInitOnceInNewThread() {
+    return new TestThread(internal::NewCallback(this, &OnceInitTest::InitOnce));
+  }
+  TestThread* RunInitRecursiveOnceInNewThread() {
+    return new TestThread(
+        internal::NewCallback(this, &OnceInitTest::InitRecursiveOnce));
+  }
+
+  enum State {
+    INIT_NOT_STARTED,
+    INIT_STARTED,
+    INIT_DONE
+  };
+  State CurrentState() {
+    MutexLock lock(&mutex_);
+    return state_;
+  }
+
+  void WaitABit() {
+#ifdef _WIN32
+    Sleep(1000);
+#else
+    sleep(1);
+#endif
+  }
+
+ private:
+  Mutex mutex_;
+  Mutex init_blocker_;
+  State state_;
+  ProtobufOnceType* once_;
+  ProtobufOnceType* recursive_once_;
+
+  void Init() {
+    MutexLock lock(&mutex_);
+    EXPECT_EQ(INIT_NOT_STARTED, state_);
+    state_ = INIT_STARTED;
+    mutex_.Unlock();
+    init_blocker_.Lock();
+    init_blocker_.Unlock();
+    mutex_.Lock();
+    state_ = INIT_DONE;
+  }
+
+  static OnceInitTest* current_test_;
+  static void InitStatic() { current_test_->Init(); }
+  static void InitRecursiveStatic() { current_test_->InitOnce(); }
+};
+
+OnceInitTest* OnceInitTest::current_test_ = NULL;
+
+GOOGLE_PROTOBUF_DECLARE_ONCE(simple_once);
+
+TEST_F(OnceInitTest, Simple) {
+  SetOnces(&simple_once, NULL);
+
+  EXPECT_EQ(INIT_NOT_STARTED, CurrentState());
+  InitOnce();
+  EXPECT_EQ(INIT_DONE, CurrentState());
+
+  // Calling again has no effect.
+  InitOnce();
+  EXPECT_EQ(INIT_DONE, CurrentState());
+}
+
+GOOGLE_PROTOBUF_DECLARE_ONCE(recursive_once1);
+GOOGLE_PROTOBUF_DECLARE_ONCE(recursive_once2);
+
+TEST_F(OnceInitTest, Recursive) {
+  SetOnces(&recursive_once1, &recursive_once2);
+
+  EXPECT_EQ(INIT_NOT_STARTED, CurrentState());
+  InitRecursiveOnce();
+  EXPECT_EQ(INIT_DONE, CurrentState());
+}
+
+GOOGLE_PROTOBUF_DECLARE_ONCE(multiple_threads_once);
+
+TEST_F(OnceInitTest, MultipleThreads) {
+  SetOnces(&multiple_threads_once, NULL);
+
+  scoped_ptr<TestThread> threads[4];
+  EXPECT_EQ(INIT_NOT_STARTED, CurrentState());
+  for (int i = 0; i < 4; i++) {
+    threads[i].reset(RunInitOnceInNewThread());
+  }
+  for (int i = 0; i < 4; i++) {
+    threads[i]->Join();
+  }
+  EXPECT_EQ(INIT_DONE, CurrentState());
+}
+
+GOOGLE_PROTOBUF_DECLARE_ONCE(multiple_threads_blocked_once1);
+GOOGLE_PROTOBUF_DECLARE_ONCE(multiple_threads_blocked_once2);
+
+TEST_F(OnceInitTest, MultipleThreadsBlocked) {
+  SetOnces(&multiple_threads_blocked_once1, &multiple_threads_blocked_once2);
+
+  scoped_ptr<TestThread> threads[8];
+  EXPECT_EQ(INIT_NOT_STARTED, CurrentState());
+
+  BlockInit();
+  for (int i = 0; i < 4; i++) {
+    threads[i].reset(RunInitOnceInNewThread());
+  }
+  for (int i = 4; i < 8; i++) {
+    threads[i].reset(RunInitRecursiveOnceInNewThread());
+  }
+
+  WaitABit();
+
+  // We should now have one thread blocked inside Init(), four blocked waiting
+  // for Init() to complete, and three blocked waiting for InitRecursive() to
+  // complete.
+  EXPECT_EQ(INIT_STARTED, CurrentState());
+  UnblockInit();
+
+  for (int i = 0; i < 8; i++) {
+    threads[i]->Join();
+  }
+  EXPECT_EQ(INIT_DONE, CurrentState());
+}
+
+}  // anonymous namespace
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/platform_macros.h b/src/google/protobuf/stubs/platform_macros.h
new file mode 100644
index 0000000..22b3572
--- /dev/null
+++ b/src/google/protobuf/stubs/platform_macros.h
@@ -0,0 +1,122 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GOOGLE_PROTOBUF_PLATFORM_MACROS_H_
+#define GOOGLE_PROTOBUF_PLATFORM_MACROS_H_
+
+#define GOOGLE_PROTOBUF_PLATFORM_ERROR \
+#error "Host platform was not detected as supported by protobuf"
+
+// Processor architecture detection.  For more info on what's defined, see:
+//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+//   http://www.agner.org/optimize/calling_conventions.pdf
+//   or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#define GOOGLE_PROTOBUF_ARCH_X64 1
+#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define GOOGLE_PROTOBUF_ARCH_IA32 1
+#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
+#elif defined(__QNX__)
+#define GOOGLE_PROTOBUF_ARCH_ARM_QNX 1
+#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
+#elif defined(__ARMEL__)
+#define GOOGLE_PROTOBUF_ARCH_ARM 1
+#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
+#elif defined(__aarch64__)
+#define GOOGLE_PROTOBUF_ARCH_AARCH64 1
+#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
+#elif defined(__MIPSEL__)
+#if defined(__LP64__)
+#define GOOGLE_PROTOBUF_ARCH_MIPS64 1
+#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
+#else
+#define GOOGLE_PROTOBUF_ARCH_MIPS 1
+#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
+#endif
+#elif defined(__pnacl__)
+#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
+#elif defined(sparc)
+#define GOOGLE_PROTOBUF_ARCH_SPARC 1
+#if defined(__sparc_v9__) || defined(__sparcv9) || defined(__arch64__)
+#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
+#else
+#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
+#endif
+#elif defined(_POWER) || defined(__powerpc64__) || defined(__PPC64__)
+#define GOOGLE_PROTOBUF_ARCH_POWER 1
+#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
+#elif defined(__GNUC__)
+# if (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4))
+// We fallback to the generic Clang/GCC >= 4.7 implementation in atomicops.h
+# elif defined(__clang__)
+#  if !__has_extension(c_atomic)
+GOOGLE_PROTOBUF_PLATFORM_ERROR
+#  endif
+// We fallback to the generic Clang/GCC >= 4.7 implementation in atomicops.h
+# endif
+# if __LP64__
+#  define GOOGLE_PROTOBUF_ARCH_64_BIT 1
+# else
+#  define GOOGLE_PROTOBUF_ARCH_32_BIT 1
+# endif
+#else
+GOOGLE_PROTOBUF_PLATFORM_ERROR
+#endif
+
+#if defined(__APPLE__)
+#define GOOGLE_PROTOBUF_OS_APPLE
+#include <TargetConditionals.h>
+#if TARGET_OS_IPHONE
+#define GOOGLE_PROTOBUF_OS_IPHONE
+#endif
+#elif defined(__EMSCRIPTEN__)
+#define GOOGLE_PROTOBUF_OS_EMSCRIPTEN
+#elif defined(__native_client__)
+#define GOOGLE_PROTOBUF_OS_NACL
+#elif defined(sun)
+#define GOOGLE_PROTOBUF_OS_SOLARIS
+#elif defined(_AIX)
+#define GOOGLE_PROTOBUF_OS_AIX
+#elif defined(__ANDROID__)
+#define GOOGLE_PROTOBUF_OS_ANDROID
+#endif
+
+#undef GOOGLE_PROTOBUF_PLATFORM_ERROR
+
+#if defined(GOOGLE_PROTOBUF_OS_ANDROID) || defined(GOOGLE_PROTOBUF_OS_IPHONE)
+// Android ndk does not support the __thread keyword very well yet. Here
+// we use pthread_key_create()/pthread_getspecific()/... methods for
+// TLS support on android.
+// iOS also does not support the __thread keyword.
+#define GOOGLE_PROTOBUF_NO_THREADLOCAL
+#endif
+
+#endif  // GOOGLE_PROTOBUF_PLATFORM_MACROS_H_
diff --git a/src/google/protobuf/stubs/port.h b/src/google/protobuf/stubs/port.h
new file mode 100644
index 0000000..1036dff
--- /dev/null
+++ b/src/google/protobuf/stubs/port.h
@@ -0,0 +1,384 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GOOGLE_PROTOBUF_STUBS_PORT_H_
+#define GOOGLE_PROTOBUF_STUBS_PORT_H_
+
+#include <assert.h>
+#include <stdlib.h>
+#include <cstddef>
+#include <string>
+#include <string.h>
+#if defined(__osf__)
+// Tru64 lacks stdint.h, but has inttypes.h which defines a superset of
+// what stdint.h would define.
+#include <inttypes.h>
+#elif !defined(_MSC_VER)
+#include <stdint.h>
+#endif
+
+#undef PROTOBUF_LITTLE_ENDIAN
+#ifdef _WIN32
+  // Assuming windows is always little-endian.
+  // TODO(xiaofeng): The PROTOBUF_LITTLE_ENDIAN is not only used for
+  // optimization but also for correctness. We should define an
+  // different macro to test the big-endian code path in coded_stream.
+  #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
+    #define PROTOBUF_LITTLE_ENDIAN 1
+  #endif
+  #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
+    // If MSVC has "/RTCc" set, it will complain about truncating casts at
+    // runtime.  This file contains some intentional truncating casts.
+    #pragma runtime_checks("c", off)
+  #endif
+#else
+  #include <sys/param.h>   // __BYTE_ORDER
+  #if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
+         (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN)) && \
+      !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
+    #define PROTOBUF_LITTLE_ENDIAN 1
+  #endif
+#endif
+#if defined(_MSC_VER) && defined(PROTOBUF_USE_DLLS)
+  #ifdef LIBPROTOBUF_EXPORTS
+    #define LIBPROTOBUF_EXPORT __declspec(dllexport)
+  #else
+    #define LIBPROTOBUF_EXPORT __declspec(dllimport)
+  #endif
+  #ifdef LIBPROTOC_EXPORTS
+    #define LIBPROTOC_EXPORT   __declspec(dllexport)
+  #else
+    #define LIBPROTOC_EXPORT   __declspec(dllimport)
+  #endif
+#else
+  #define LIBPROTOBUF_EXPORT
+  #define LIBPROTOC_EXPORT
+#endif
+
+// These #includes are for the byte swap functions declared later on.
+#ifdef _MSC_VER
+#include <stdlib.h>  // NOLINT(build/include)
+#elif defined(__APPLE__)
+#include <libkern/OSByteOrder.h>
+#elif defined(__GLIBC__) || defined(__CYGWIN__)
+#include <byteswap.h>  // IWYU pragma: export
+#endif
+
+// ===================================================================
+// from google3/base/port.h
+namespace google {
+namespace protobuf {
+
+typedef unsigned int uint;
+
+#ifdef _MSC_VER
+typedef signed __int8  int8;
+typedef __int16 int16;
+typedef __int32 int32;
+typedef __int64 int64;
+
+typedef unsigned __int8  uint8;
+typedef unsigned __int16 uint16;
+typedef unsigned __int32 uint32;
+typedef unsigned __int64 uint64;
+#else
+typedef signed char  int8;
+typedef short int16;
+typedef int int32;
+typedef long long int64;
+
+typedef unsigned char  uint8;
+typedef unsigned short uint16;
+typedef unsigned int uint32;
+typedef unsigned long long uint64;
+#endif
+
+// long long macros to be used because gcc and vc++ use different suffixes,
+// and different size specifiers in format strings
+#undef GOOGLE_LONGLONG
+#undef GOOGLE_ULONGLONG
+#undef GOOGLE_LL_FORMAT
+
+#ifdef _MSC_VER
+#define GOOGLE_LONGLONG(x) x##I64
+#define GOOGLE_ULONGLONG(x) x##UI64
+#define GOOGLE_LL_FORMAT "I64"  // As in printf("%I64d", ...)
+#else
+#define GOOGLE_LONGLONG(x) x##LL
+#define GOOGLE_ULONGLONG(x) x##ULL
+#define GOOGLE_LL_FORMAT "ll"  // As in "%lld". Note that "q" is poor form also.
+#endif
+
+static const int32 kint32max = 0x7FFFFFFF;
+static const int32 kint32min = -kint32max - 1;
+static const int64 kint64max = GOOGLE_LONGLONG(0x7FFFFFFFFFFFFFFF);
+static const int64 kint64min = -kint64max - 1;
+static const uint32 kuint32max = 0xFFFFFFFFu;
+static const uint64 kuint64max = GOOGLE_ULONGLONG(0xFFFFFFFFFFFFFFFF);
+
+// -------------------------------------------------------------------
+// Annotations:  Some parts of the code have been annotated in ways that might
+//   be useful to some compilers or tools, but are not supported universally.
+//   You can #define these annotations yourself if the default implementation
+//   is not right for you.
+
+#ifndef GOOGLE_ATTRIBUTE_ALWAYS_INLINE
+#if defined(__GNUC__) && (__GNUC__ > 3 ||(__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
+// For functions we want to force inline.
+// Introduced in gcc 3.1.
+#define GOOGLE_ATTRIBUTE_ALWAYS_INLINE __attribute__ ((always_inline))
+#else
+// Other compilers will have to figure it out for themselves.
+#define GOOGLE_ATTRIBUTE_ALWAYS_INLINE
+#endif
+#endif
+
+#ifndef GOOGLE_ATTRIBUTE_NOINLINE
+#if defined(__GNUC__) && (__GNUC__ > 3 ||(__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
+// For functions we want to force not inline.
+// Introduced in gcc 3.1.
+#define GOOGLE_ATTRIBUTE_NOINLINE __attribute__ ((noinline))
+#elif defined(_MSC_VER) && (_MSC_VER >= 1400)
+// Seems to have been around since at least Visual Studio 2005
+#define GOOGLE_ATTRIBUTE_NOINLINE __declspec(noinline)
+#else
+// Other compilers will have to figure it out for themselves.
+#define GOOGLE_ATTRIBUTE_NOINLINE
+#endif
+#endif
+
+#ifndef GOOGLE_ATTRIBUTE_DEPRECATED
+#ifdef __GNUC__
+// If the method/variable/type is used anywhere, produce a warning.
+#define GOOGLE_ATTRIBUTE_DEPRECATED __attribute__((deprecated))
+#else
+#define GOOGLE_ATTRIBUTE_DEPRECATED
+#endif
+#endif
+
+#ifndef GOOGLE_PREDICT_TRUE
+#ifdef __GNUC__
+// Provided at least since GCC 3.0.
+#define GOOGLE_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+#else
+#define GOOGLE_PREDICT_TRUE(x) (x)
+#endif
+#endif
+
+#ifndef GOOGLE_PREDICT_FALSE
+#ifdef __GNUC__
+// Provided at least since GCC 3.0.
+#define GOOGLE_PREDICT_FALSE(x) (__builtin_expect(x, 0))
+#else
+#define GOOGLE_PREDICT_FALSE(x) (x)
+#endif
+#endif
+
+// Delimits a block of code which may write to memory which is simultaneously
+// written by other threads, but which has been determined to be thread-safe
+// (e.g. because it is an idempotent write).
+#ifndef GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN
+#define GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN()
+#endif
+#ifndef GOOGLE_SAFE_CONCURRENT_WRITES_END
+#define GOOGLE_SAFE_CONCURRENT_WRITES_END()
+#endif
+
+#if defined(__clang__) && defined(__has_cpp_attribute) \
+    && !defined(GOOGLE_PROTOBUF_OS_APPLE)
+# if defined(GOOGLE_PROTOBUF_OS_NACL) || defined(EMSCRIPTEN) || \
+     __has_cpp_attribute(clang::fallthrough)
+#  define GOOGLE_FALLTHROUGH_INTENDED [[clang::fallthrough]]
+# endif
+#endif
+
+#ifndef GOOGLE_FALLTHROUGH_INTENDED
+# define GOOGLE_FALLTHROUGH_INTENDED
+#endif
+
+#define GOOGLE_GUARDED_BY(x)
+#define GOOGLE_ATTRIBUTE_COLD
+
+// x86 and x86-64 can perform unaligned loads/stores directly.
+#if defined(_M_X64) || defined(__x86_64__) || \
+    defined(_M_IX86) || defined(__i386__)
+
+#define GOOGLE_UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
+#define GOOGLE_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
+#define GOOGLE_UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
+
+#define GOOGLE_UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
+#define GOOGLE_UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
+#define GOOGLE_UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
+
+#else
+inline uint16 GOOGLE_UNALIGNED_LOAD16(const void *p) {
+  uint16 t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline uint32 GOOGLE_UNALIGNED_LOAD32(const void *p) {
+  uint32 t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline uint64 GOOGLE_UNALIGNED_LOAD64(const void *p) {
+  uint64 t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16 v) {
+  memcpy(p, &v, sizeof v);
+}
+
+inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32 v) {
+  memcpy(p, &v, sizeof v);
+}
+
+inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64 v) {
+  memcpy(p, &v, sizeof v);
+}
+#endif
+
+#if defined(_MSC_VER)
+#define GOOGLE_THREAD_LOCAL __declspec(thread)
+#else
+#define GOOGLE_THREAD_LOCAL __thread
+#endif
+
+// The following guarantees declaration of the byte swap functions, and
+// defines __BYTE_ORDER for MSVC
+#ifdef _MSC_VER
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#define bswap_16(x) _byteswap_ushort(x)
+#define bswap_32(x) _byteswap_ulong(x)
+#define bswap_64(x) _byteswap_uint64(x)
+
+#elif defined(__APPLE__)
+// Mac OS X / Darwin features
+#define bswap_16(x) OSSwapInt16(x)
+#define bswap_32(x) OSSwapInt32(x)
+#define bswap_64(x) OSSwapInt64(x)
+
+#elif !defined(__GLIBC__) && !defined(__CYGWIN__)
+
+static inline uint16 bswap_16(uint16 x) {
+  return static_cast<uint16>(((x & 0xFF) << 8) | ((x & 0xFF00) >> 8));
+}
+#define bswap_16(x) bswap_16(x)
+static inline uint32 bswap_32(uint32 x) {
+  return (((x & 0xFF) << 24) |
+          ((x & 0xFF00) << 8) |
+          ((x & 0xFF0000) >> 8) |
+          ((x & 0xFF000000) >> 24));
+}
+#define bswap_32(x) bswap_32(x)
+static inline uint64 bswap_64(uint64 x) {
+  return (((x & GOOGLE_ULONGLONG(0xFF)) << 56) |
+          ((x & GOOGLE_ULONGLONG(0xFF00)) << 40) |
+          ((x & GOOGLE_ULONGLONG(0xFF0000)) << 24) |
+          ((x & GOOGLE_ULONGLONG(0xFF000000)) << 8) |
+          ((x & GOOGLE_ULONGLONG(0xFF00000000)) >> 8) |
+          ((x & GOOGLE_ULONGLONG(0xFF0000000000)) >> 24) |
+          ((x & GOOGLE_ULONGLONG(0xFF000000000000)) >> 40) |
+          ((x & GOOGLE_ULONGLONG(0xFF00000000000000)) >> 56));
+}
+#define bswap_64(x) bswap_64(x)
+
+#endif
+
+// ===================================================================
+// from google3/util/endian/endian.h
+LIBPROTOBUF_EXPORT uint32 ghtonl(uint32 x);
+
+class BigEndian {
+ public:
+#ifdef PROTOBUF_LITTLE_ENDIAN
+
+  static uint16 FromHost16(uint16 x) { return bswap_16(x); }
+  static uint16 ToHost16(uint16 x) { return bswap_16(x); }
+
+  static uint32 FromHost32(uint32 x) { return bswap_32(x); }
+  static uint32 ToHost32(uint32 x) { return bswap_32(x); }
+
+  static uint64 FromHost64(uint64 x) { return bswap_64(x); }
+  static uint64 ToHost64(uint64 x) { return bswap_64(x); }
+
+  static bool IsLittleEndian() { return true; }
+
+#else
+
+  static uint16 FromHost16(uint16 x) { return x; }
+  static uint16 ToHost16(uint16 x) { return x; }
+
+  static uint32 FromHost32(uint32 x) { return x; }
+  static uint32 ToHost32(uint32 x) { return x; }
+
+  static uint64 FromHost64(uint64 x) { return x; }
+  static uint64 ToHost64(uint64 x) { return x; }
+
+  static bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+  // Functions to do unaligned loads and stores in big-endian order.
+  static uint16 Load16(const void *p) {
+    return ToHost16(GOOGLE_UNALIGNED_LOAD16(p));
+  }
+
+  static void Store16(void *p, uint16 v) {
+    GOOGLE_UNALIGNED_STORE16(p, FromHost16(v));
+  }
+
+  static uint32 Load32(const void *p) {
+    return ToHost32(GOOGLE_UNALIGNED_LOAD32(p));
+  }
+
+  static void Store32(void *p, uint32 v) {
+    GOOGLE_UNALIGNED_STORE32(p, FromHost32(v));
+  }
+
+  static uint64 Load64(const void *p) {
+    return ToHost64(GOOGLE_UNALIGNED_LOAD64(p));
+  }
+
+  static void Store64(void *p, uint64 v) {
+    GOOGLE_UNALIGNED_STORE64(p, FromHost64(v));
+  }
+};
+
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_PORT_H_
diff --git a/src/google/protobuf/stubs/scoped_ptr.h b/src/google/protobuf/stubs/scoped_ptr.h
new file mode 100644
index 0000000..4423c11
--- /dev/null
+++ b/src/google/protobuf/stubs/scoped_ptr.h
@@ -0,0 +1,236 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GOOGLE_PROTOBUF_STUBS_SCOPED_PTR_H_
+#define GOOGLE_PROTOBUF_STUBS_SCOPED_PTR_H_
+
+#include <google/protobuf/stubs/port.h>
+
+namespace google {
+namespace protobuf {
+
+// ===================================================================
+// from google3/base/scoped_ptr.h
+
+namespace internal {
+
+//  This is an implementation designed to match the anticipated future TR2
+//  implementation of the scoped_ptr class, and its closely-related brethren,
+//  scoped_array, scoped_ptr_malloc, and make_scoped_ptr.
+
+template <class C> class scoped_ptr;
+template <class C> class scoped_array;
+
+// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>
+// automatically deletes the pointer it holds (if any).
+// That is, scoped_ptr<T> owns the T object that it points to.
+// Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to a T object.
+//
+// The size of a scoped_ptr is small:
+// sizeof(scoped_ptr<C>) == sizeof(C*)
+template <class C>
+class scoped_ptr {
+ public:
+
+  // The element type
+  typedef C element_type;
+
+  // Constructor.  Defaults to initializing with NULL.
+  // There is no way to create an uninitialized scoped_ptr.
+  // The input parameter must be allocated with new.
+  explicit scoped_ptr(C* p = NULL) : ptr_(p) { }
+
+  // Destructor.  If there is a C object, delete it.
+  // We don't need to test ptr_ == NULL because C++ does that for us.
+  ~scoped_ptr() {
+    enum { type_must_be_complete = sizeof(C) };
+    delete ptr_;
+  }
+
+  // Reset.  Deletes the current owned object, if any.
+  // Then takes ownership of a new object, if given.
+  // this->reset(this->get()) works.
+  void reset(C* p = NULL) {
+    if (p != ptr_) {
+      enum { type_must_be_complete = sizeof(C) };
+      delete ptr_;
+      ptr_ = p;
+    }
+  }
+
+  // Accessors to get the owned object.
+  // operator* and operator-> will assert() if there is no current object.
+  C& operator*() const {
+    assert(ptr_ != NULL);
+    return *ptr_;
+  }
+  C* operator->() const  {
+    assert(ptr_ != NULL);
+    return ptr_;
+  }
+  C* get() const { return ptr_; }
+
+  // Comparison operators.
+  // These return whether two scoped_ptr refer to the same object, not just to
+  // two different but equal objects.
+  bool operator==(C* p) const { return ptr_ == p; }
+  bool operator!=(C* p) const { return ptr_ != p; }
+
+  // Swap two scoped pointers.
+  void swap(scoped_ptr& p2) {
+    C* tmp = ptr_;
+    ptr_ = p2.ptr_;
+    p2.ptr_ = tmp;
+  }
+
+  // Release a pointer.
+  // The return value is the current pointer held by this object.
+  // If this object holds a NULL pointer, the return value is NULL.
+  // After this operation, this object will hold a NULL pointer,
+  // and will not own the object any more.
+  C* release() {
+    C* retVal = ptr_;
+    ptr_ = NULL;
+    return retVal;
+  }
+
+ private:
+  C* ptr_;
+
+  // Forbid comparison of scoped_ptr types.  If C2 != C, it totally doesn't
+  // make sense, and if C2 == C, it still doesn't make sense because you should
+  // never have the same object owned by two different scoped_ptrs.
+  template <class C2> bool operator==(scoped_ptr<C2> const& p2) const;
+  template <class C2> bool operator!=(scoped_ptr<C2> const& p2) const;
+
+  // Disallow evil constructors
+  scoped_ptr(const scoped_ptr&);
+  void operator=(const scoped_ptr&);
+};
+
+// scoped_array<C> is like scoped_ptr<C>, except that the caller must allocate
+// with new [] and the destructor deletes objects with delete [].
+//
+// As with scoped_ptr<C>, a scoped_array<C> either points to an object
+// or is NULL.  A scoped_array<C> owns the object that it points to.
+//
+// Size: sizeof(scoped_array<C>) == sizeof(C*)
+template <class C>
+class scoped_array {
+ public:
+
+  // The element type
+  typedef C element_type;
+
+  // Constructor.  Defaults to initializing with NULL.
+  // There is no way to create an uninitialized scoped_array.
+  // The input parameter must be allocated with new [].
+  explicit scoped_array(C* p = NULL) : array_(p) { }
+
+  // Destructor.  If there is a C object, delete it.
+  // We don't need to test ptr_ == NULL because C++ does that for us.
+  ~scoped_array() {
+    enum { type_must_be_complete = sizeof(C) };
+    delete[] array_;
+  }
+
+  // Reset.  Deletes the current owned object, if any.
+  // Then takes ownership of a new object, if given.
+  // this->reset(this->get()) works.
+  void reset(C* p = NULL) {
+    if (p != array_) {
+      enum { type_must_be_complete = sizeof(C) };
+      delete[] array_;
+      array_ = p;
+    }
+  }
+
+  // Get one element of the current object.
+  // Will assert() if there is no current object, or index i is negative.
+  C& operator[](std::ptrdiff_t i) const {
+    assert(i >= 0);
+    assert(array_ != NULL);
+    return array_[i];
+  }
+
+  // Get a pointer to the zeroth element of the current object.
+  // If there is no current object, return NULL.
+  C* get() const {
+    return array_;
+  }
+
+  // Comparison operators.
+  // These return whether two scoped_array refer to the same object, not just to
+  // two different but equal objects.
+  bool operator==(C* p) const { return array_ == p; }
+  bool operator!=(C* p) const { return array_ != p; }
+
+  // Swap two scoped arrays.
+  void swap(scoped_array& p2) {
+    C* tmp = array_;
+    array_ = p2.array_;
+    p2.array_ = tmp;
+  }
+
+  // Release an array.
+  // The return value is the current pointer held by this object.
+  // If this object holds a NULL pointer, the return value is NULL.
+  // After this operation, this object will hold a NULL pointer,
+  // and will not own the object any more.
+  C* release() {
+    C* retVal = array_;
+    array_ = NULL;
+    return retVal;
+  }
+
+ private:
+  C* array_;
+
+  // Forbid comparison of different scoped_array types.
+  template <class C2> bool operator==(scoped_array<C2> const& p2) const;
+  template <class C2> bool operator!=(scoped_array<C2> const& p2) const;
+
+  // Disallow evil constructors
+  scoped_array(const scoped_array&);
+  void operator=(const scoped_array&);
+};
+
+}  // namespace internal
+
+// We made these internal so that they would show up as such in the docs,
+// but we don't want to stick "internal::" in front of them everywhere.
+using internal::scoped_ptr;
+using internal::scoped_array;
+
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_SCOPED_PTR_H_
diff --git a/src/google/protobuf/stubs/shared_ptr.h b/src/google/protobuf/stubs/shared_ptr.h
new file mode 100644
index 0000000..d250bf4
--- /dev/null
+++ b/src/google/protobuf/stubs/shared_ptr.h
@@ -0,0 +1,470 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2014 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// from google3/util/gtl/shared_ptr.h
+
+#ifndef GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__
+#define GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__
+
+#include <google/protobuf/stubs/atomicops.h>
+
+#include <algorithm>  // for swap
+#include <stddef.h>
+#include <memory>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// Alias to std::shared_ptr for any C++11 platform,
+// and for any supported MSVC compiler.
+#if !defined(UTIL_GTL_USE_STD_SHARED_PTR) && \
+    (defined(COMPILER_MSVC) || defined(LANG_CXX11))
+#define UTIL_GTL_USE_STD_SHARED_PTR 1
+#endif
+
+#if defined(UTIL_GTL_USE_STD_SHARED_PTR) && UTIL_GTL_USE_STD_SHARED_PTR
+
+// These are transitional.  They will be going away soon.
+// Please just #include <memory> and just type std::shared_ptr yourself, instead
+// of relying on this file.
+//
+// Migration doc: http://go/std-shared-ptr-lsc
+using std::enable_shared_from_this;
+using std::shared_ptr;
+using std::static_pointer_cast;
+using std::weak_ptr;
+
+#else  // below, UTIL_GTL_USE_STD_SHARED_PTR not set or set to 0.
+
+// For everything else there is the google3 implementation.
+inline bool RefCountDec(volatile Atomic32 *ptr) {
+  return Barrier_AtomicIncrement(ptr, -1) != 0;
+}
+
+inline void RefCountInc(volatile Atomic32 *ptr) {
+  NoBarrier_AtomicIncrement(ptr, 1);
+}
+
+template <typename T> class shared_ptr;
+template <typename T> class weak_ptr;
+
+// This class is an internal implementation detail for shared_ptr. If two
+// shared_ptrs point to the same object, they also share a control block.
+// An "empty" shared_pointer refers to NULL and also has a NULL control block.
+// It contains all of the state that's needed for reference counting or any
+// other kind of resource management. In this implementation the control block
+// happens to consist of two atomic words, the reference count (the number
+// of shared_ptrs that share ownership of the object) and the weak count
+// (the number of weak_ptrs that observe the object, plus 1 if the
+// refcount is nonzero).
+//
+// The "plus 1" is to prevent a race condition in the shared_ptr and
+// weak_ptr destructors. We need to make sure the control block is
+// only deleted once, so we need to make sure that at most one
+// object sees the weak count decremented from 1 to 0.
+class SharedPtrControlBlock {
+  template <typename T> friend class shared_ptr;
+  template <typename T> friend class weak_ptr;
+ private:
+  SharedPtrControlBlock() : refcount_(1), weak_count_(1) { }
+  Atomic32 refcount_;
+  Atomic32 weak_count_;
+};
+
+// Forward declaration. The class is defined below.
+template <typename T> class enable_shared_from_this;
+
+template <typename T>
+class shared_ptr {
+  template <typename U> friend class weak_ptr;
+ public:
+  typedef T element_type;
+
+  shared_ptr() : ptr_(NULL), control_block_(NULL) {}
+
+  explicit shared_ptr(T* ptr)
+      : ptr_(ptr),
+        control_block_(ptr != NULL ? new SharedPtrControlBlock : NULL) {
+    // If p is non-null and T inherits from enable_shared_from_this, we
+    // set up the data that shared_from_this needs.
+    MaybeSetupWeakThis(ptr);
+  }
+
+  // Copy constructor: makes this object a copy of ptr, and increments
+  // the reference count.
+  template <typename U>
+  shared_ptr(const shared_ptr<U>& ptr)
+      : ptr_(NULL),
+        control_block_(NULL) {
+    Initialize(ptr);
+  }
+  // Need non-templated version to prevent the compiler-generated default
+  shared_ptr(const shared_ptr<T>& ptr)
+      : ptr_(NULL),
+        control_block_(NULL) {
+    Initialize(ptr);
+  }
+
+  // Assignment operator. Replaces the existing shared_ptr with ptr.
+  // Increment ptr's reference count and decrement the one being replaced.
+  template <typename U>
+  shared_ptr<T>& operator=(const shared_ptr<U>& ptr) {
+    if (ptr_ != ptr.ptr_) {
+      shared_ptr<T> me(ptr);   // will hold our previous state to be destroyed.
+      swap(me);
+    }
+    return *this;
+  }
+
+  // Need non-templated version to prevent the compiler-generated default
+  shared_ptr<T>& operator=(const shared_ptr<T>& ptr) {
+    if (ptr_ != ptr.ptr_) {
+      shared_ptr<T> me(ptr);   // will hold our previous state to be destroyed.
+      swap(me);
+    }
+    return *this;
+  }
+
+  // TODO(austern): Consider providing this constructor. The draft C++ standard
+  // (20.8.10.2.1) includes it. However, it says that this constructor throws
+  // a bad_weak_ptr exception when ptr is expired. Is it better to provide this
+  // constructor and make it do something else, like fail with a CHECK, or to
+  // leave this constructor out entirely?
+  //
+  // template <typename U>
+  // shared_ptr(const weak_ptr<U>& ptr);
+
+  ~shared_ptr() {
+    if (ptr_ != NULL) {
+      if (!RefCountDec(&control_block_->refcount_)) {
+        delete ptr_;
+
+        // weak_count_ is defined as the number of weak_ptrs that observe
+        // ptr_, plus 1 if refcount_ is nonzero.
+        if (!RefCountDec(&control_block_->weak_count_)) {
+          delete control_block_;
+        }
+      }
+    }
+  }
+
+  // Replaces underlying raw pointer with the one passed in.  The reference
+  // count is set to one (or zero if the pointer is NULL) for the pointer
+  // being passed in and decremented for the one being replaced.
+  //
+  // If you have a compilation error with this code, make sure you aren't
+  // passing NULL, nullptr, or 0 to this function.  Call reset without an
+  // argument to reset to a null ptr.
+  template <typename Y>
+  void reset(Y* p) {
+    if (p != ptr_) {
+      shared_ptr<T> tmp(p);
+      tmp.swap(*this);
+    }
+  }
+
+  void reset() {
+    reset(static_cast<T*>(NULL));
+  }
+
+  // Exchanges the contents of this with the contents of r.  This function
+  // supports more efficient swapping since it eliminates the need for a
+  // temporary shared_ptr object.
+  void swap(shared_ptr<T>& r) {
+    using std::swap;  // http://go/using-std-swap
+    swap(ptr_, r.ptr_);
+    swap(control_block_, r.control_block_);
+  }
+
+  // The following function is useful for gaining access to the underlying
+  // pointer when a shared_ptr remains in scope so the reference-count is
+  // known to be > 0 (e.g. for parameter passing).
+  T* get() const {
+    return ptr_;
+  }
+
+  T& operator*() const {
+    return *ptr_;
+  }
+
+  T* operator->() const {
+    return ptr_;
+  }
+
+  long use_count() const {
+    return control_block_ ? control_block_->refcount_ : 1;
+  }
+
+  bool unique() const {
+    return use_count() == 1;
+  }
+
+ private:
+  // If r is non-empty, initialize *this to share ownership with r,
+  // increasing the underlying reference count.
+  // If r is empty, *this remains empty.
+  // Requires: this is empty, namely this->ptr_ == NULL.
+  template <typename U>
+  void Initialize(const shared_ptr<U>& r) {
+    // This performs a static_cast on r.ptr_ to U*, which is a no-op since it
+    // is already a U*. So initialization here requires that r.ptr_ is
+    // implicitly convertible to T*.
+    InitializeWithStaticCast<U>(r);
+  }
+
+  // Initializes *this as described in Initialize, but additionally performs a
+  // static_cast from r.ptr_ (V*) to U*.
+  // NOTE(gfc): We'd need a more general form to support const_pointer_cast and
+  // dynamic_pointer_cast, but those operations are sufficiently discouraged
+  // that supporting static_pointer_cast is sufficient.
+  template <typename U, typename V>
+  void InitializeWithStaticCast(const shared_ptr<V>& r) {
+    if (r.control_block_ != NULL) {
+      RefCountInc(&r.control_block_->refcount_);
+
+      ptr_ = static_cast<U*>(r.ptr_);
+      control_block_ = r.control_block_;
+    }
+  }
+
+  // Helper function for the constructor that takes a raw pointer. If T
+  // doesn't inherit from enable_shared_from_this<T> then we have nothing to
+  // do, so this function is trivial and inline. The other version is declared
+  // out of line, after the class definition of enable_shared_from_this.
+  void MaybeSetupWeakThis(enable_shared_from_this<T>* ptr);
+  void MaybeSetupWeakThis(...) { }
+
+  T* ptr_;
+  SharedPtrControlBlock* control_block_;
+
+#ifndef SWIG
+  template <typename U>
+  friend class shared_ptr;
+
+  template <typename U, typename V>
+  friend shared_ptr<U> static_pointer_cast(const shared_ptr<V>& rhs);
+#endif
+};
+
+// Matches the interface of std::swap as an aid to generic programming.
+template <typename T> void swap(shared_ptr<T>& r, shared_ptr<T>& s) {
+  r.swap(s);
+}
+
+template <typename T, typename U>
+shared_ptr<T> static_pointer_cast(const shared_ptr<U>& rhs) {
+  shared_ptr<T> lhs;
+  lhs.template InitializeWithStaticCast<T>(rhs);
+  return lhs;
+}
+
+// See comments at the top of the file for a description of why this
+// class exists, and the draft C++ standard (as of July 2009 the
+// latest draft is N2914) for the detailed specification.
+template <typename T>
+class weak_ptr {
+  template <typename U> friend class weak_ptr;
+ public:
+  typedef T element_type;
+
+  // Create an empty (i.e. already expired) weak_ptr.
+  weak_ptr() : ptr_(NULL), control_block_(NULL) { }
+
+  // Create a weak_ptr that observes the same object that ptr points
+  // to.  Note that there is no race condition here: we know that the
+  // control block can't disappear while we're looking at it because
+  // it is owned by at least one shared_ptr, ptr.
+  template <typename U> weak_ptr(const shared_ptr<U>& ptr) {
+    CopyFrom(ptr.ptr_, ptr.control_block_);
+  }
+
+  // Copy a weak_ptr. The object it points to might disappear, but we
+  // don't care: we're only working with the control block, and it can't
+  // disappear while we're looking at because it's owned by at least one
+  // weak_ptr, ptr.
+  template <typename U> weak_ptr(const weak_ptr<U>& ptr) {
+    CopyFrom(ptr.ptr_, ptr.control_block_);
+  }
+
+  // Need non-templated version to prevent default copy constructor
+  weak_ptr(const weak_ptr& ptr) {
+    CopyFrom(ptr.ptr_, ptr.control_block_);
+  }
+
+  // Destroy the weak_ptr. If no shared_ptr owns the control block, and if
+  // we are the last weak_ptr to own it, then it can be deleted. Note that
+  // weak_count_ is defined as the number of weak_ptrs sharing this control
+  // block, plus 1 if there are any shared_ptrs. We therefore know that it's
+  // safe to delete the control block when weak_count_ reaches 0, without
+  // having to perform any additional tests.
+  ~weak_ptr() {
+    if (control_block_ != NULL &&
+        !RefCountDec(&control_block_->weak_count_)) {
+      delete control_block_;
+    }
+  }
+
+  weak_ptr& operator=(const weak_ptr& ptr) {
+    if (&ptr != this) {
+      weak_ptr tmp(ptr);
+      tmp.swap(*this);
+    }
+    return *this;
+  }
+  template <typename U> weak_ptr& operator=(const weak_ptr<U>& ptr) {
+    weak_ptr tmp(ptr);
+    tmp.swap(*this);
+    return *this;
+  }
+  template <typename U> weak_ptr& operator=(const shared_ptr<U>& ptr) {
+    weak_ptr tmp(ptr);
+    tmp.swap(*this);
+    return *this;
+  }
+
+  void swap(weak_ptr& ptr) {
+    using std::swap;  // http://go/using-std-swap
+    swap(ptr_, ptr.ptr_);
+    swap(control_block_, ptr.control_block_);
+  }
+
+  void reset() {
+    weak_ptr tmp;
+    tmp.swap(*this);
+  }
+
+  // Return the number of shared_ptrs that own the object we are observing.
+  // Note that this number can be 0 (if this pointer has expired).
+  long use_count() const {
+    return control_block_ != NULL ? control_block_->refcount_ : 0;
+  }
+
+  bool expired() const { return use_count() == 0; }
+
+  // Return a shared_ptr that owns the object we are observing. If we
+  // have expired, the shared_ptr will be empty. We have to be careful
+  // about concurrency, though, since some other thread might be
+  // destroying the last owning shared_ptr while we're in this
+  // function.  We want to increment the refcount only if it's nonzero
+  // and get the new value, and we want that whole operation to be
+  // atomic.
+  shared_ptr<T> lock() const {
+    shared_ptr<T> result;
+    if (control_block_ != NULL) {
+      Atomic32 old_refcount;
+      do {
+        old_refcount = control_block_->refcount_;
+        if (old_refcount == 0)
+          break;
+      } while (old_refcount !=
+               NoBarrier_CompareAndSwap(
+                   &control_block_->refcount_, old_refcount,
+                   old_refcount + 1));
+      if (old_refcount > 0) {
+        result.ptr_ = ptr_;
+        result.control_block_ = control_block_;
+      }
+    }
+
+    return result;
+  }
+
+ private:
+  void CopyFrom(T* ptr, SharedPtrControlBlock* control_block) {
+    ptr_ = ptr;
+    control_block_ = control_block;
+    if (control_block_ != NULL)
+      RefCountInc(&control_block_->weak_count_);
+  }
+
+ private:
+  element_type* ptr_;
+  SharedPtrControlBlock* control_block_;
+};
+
+template <typename T> void swap(weak_ptr<T>& r, weak_ptr<T>& s) {
+  r.swap(s);
+}
+
+// See comments at the top of the file for a description of why this class
+// exists, and section 20.8.10.5 of the draft C++ standard (as of July 2009
+// the latest draft is N2914) for the detailed specification.
+template <typename T>
+class enable_shared_from_this {
+  friend class shared_ptr<T>;
+ public:
+  // Precondition: there must be a shared_ptr that owns *this and that was
+  // created, directly or indirectly, from a raw pointer of type T*. (The
+  // latter part of the condition is technical but not quite redundant; it
+  // rules out some complicated uses involving inheritance hierarchies.)
+  shared_ptr<T> shared_from_this() {
+    // Behavior is undefined if the precondition isn't satisfied; we choose
+    // to die with a CHECK failure.
+    CHECK(!weak_this_.expired()) << "No shared_ptr owns this object";
+    return weak_this_.lock();
+  }
+  shared_ptr<const T> shared_from_this() const {
+    CHECK(!weak_this_.expired()) << "No shared_ptr owns this object";
+    return weak_this_.lock();
+  }
+
+ protected:
+  enable_shared_from_this() { }
+  enable_shared_from_this(const enable_shared_from_this& other) { }
+  enable_shared_from_this& operator=(const enable_shared_from_this& other) {
+    return *this;
+  }
+  ~enable_shared_from_this() { }
+
+ private:
+  weak_ptr<T> weak_this_;
+};
+
+// This is a helper function called by shared_ptr's constructor from a raw
+// pointer. If T inherits from enable_shared_from_this<T>, it sets up
+// weak_this_ so that shared_from_this works correctly. If T does not inherit
+// from weak_this we get a different overload, defined inline, which does
+// nothing.
+template<typename T>
+void shared_ptr<T>::MaybeSetupWeakThis(enable_shared_from_this<T>* ptr) {
+  if (ptr) {
+    CHECK(ptr->weak_this_.expired()) << "Object already owned by a shared_ptr";
+    ptr->weak_this_ = *this;
+  }
+}
+
+#endif  // UTIL_GTL_USE_STD_SHARED_PTR
+
+}  // internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__
diff --git a/src/google/protobuf/stubs/singleton.h b/src/google/protobuf/stubs/singleton.h
new file mode 100644
index 0000000..9301f54
--- /dev/null
+++ b/src/google/protobuf/stubs/singleton.h
@@ -0,0 +1,68 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2014 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef GOOGLE_PROTOBUF_STUBS_SINGLETON_H__
+#define GOOGLE_PROTOBUF_STUBS_SINGLETON_H__
+
+#include <google/protobuf/stubs/atomicops.h>
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/once.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+template<typename T>
+class Singleton {
+ public:
+  static T* get() {
+    GoogleOnceInit(&once_, &Singleton<T>::Init);
+    return instance_;
+  }
+  static void ShutDown() {
+    delete instance_;
+    instance_ = NULL;
+  }
+ private:
+  static void Init() {
+    instance_ = new T();
+  }
+  static ProtobufOnceType once_;
+  static T* instance_;
+};
+
+template<typename T>
+ProtobufOnceType Singleton<T>::once_;
+
+template<typename T>
+T* Singleton<T>::instance_ = NULL;
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_SINGLETON_H__
diff --git a/src/google/protobuf/stubs/status.cc b/src/google/protobuf/stubs/status.cc
new file mode 100644
index 0000000..dd1bd61
--- /dev/null
+++ b/src/google/protobuf/stubs/status.cc
@@ -0,0 +1,134 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <google/protobuf/stubs/status.h>
+
+#include <ostream>
+#include <stdio.h>
+#include <string>
+#include <utility>
+
+namespace google {
+namespace protobuf {
+namespace util {
+namespace error {
+inline string CodeEnumToString(error::Code code) {
+  switch (code) {
+    case OK:
+      return "OK";
+    case CANCELLED:
+      return "CANCELLED";
+    case UNKNOWN:
+      return "UNKNOWN";
+    case INVALID_ARGUMENT:
+      return "INVALID_ARGUMENT";
+    case DEADLINE_EXCEEDED:
+      return "DEADLINE_EXCEEDED";
+    case NOT_FOUND:
+      return "NOT_FOUND";
+    case ALREADY_EXISTS:
+      return "ALREADY_EXISTS";
+    case PERMISSION_DENIED:
+      return "PERMISSION_DENIED";
+    case UNAUTHENTICATED:
+      return "UNAUTHENTICATED";
+    case RESOURCE_EXHAUSTED:
+      return "RESOURCE_EXHAUSTED";
+    case FAILED_PRECONDITION:
+      return "FAILED_PRECONDITION";
+    case ABORTED:
+      return "ABORTED";
+    case OUT_OF_RANGE:
+      return "OUT_OF_RANGE";
+    case UNIMPLEMENTED:
+      return "UNIMPLEMENTED";
+    case INTERNAL:
+      return "INTERNAL";
+    case UNAVAILABLE:
+      return "UNAVAILABLE";
+    case DATA_LOSS:
+      return "DATA_LOSS";
+  }
+
+  // No default clause, clang will abort if a code is missing from
+  // above switch.
+  return "UNKNOWN";
+}
+}  // namespace error.
+
+const Status Status::OK = Status();
+const Status Status::CANCELLED = Status(error::CANCELLED, "");
+const Status Status::UNKNOWN = Status(error::UNKNOWN, "");
+
+Status::Status() : error_code_(error::OK) {
+}
+
+Status::Status(error::Code error_code, StringPiece error_message)
+    : error_code_(error_code) {
+  if (error_code != error::OK) {
+    error_message_ = error_message.ToString();
+  }
+}
+
+Status::Status(const Status& other)
+    : error_code_(other.error_code_), error_message_(other.error_message_) {
+}
+
+Status& Status::operator=(const Status& other) {
+  error_code_ = other.error_code_;
+  error_message_ = other.error_message_;
+  return *this;
+}
+
+bool Status::operator==(const Status& x) const {
+  return error_code_ == x.error_code_ &&
+      error_message_ == x.error_message_;
+}
+
+string Status::ToString() const {
+  if (error_code_ == error::OK) {
+    return "OK";
+  } else {
+    if (error_message_.empty()) {
+      return error::CodeEnumToString(error_code_);
+    } else {
+      return error::CodeEnumToString(error_code_) + ":" +
+          error_message_;
+    }
+  }
+}
+
+ostream& operator<<(ostream& os, const Status& x) {
+  os << x.ToString();
+  return os;
+}
+
+}  // namespace util
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/status.h b/src/google/protobuf/stubs/status.h
new file mode 100644
index 0000000..614ab99
--- /dev/null
+++ b/src/google/protobuf/stubs/status.h
@@ -0,0 +1,116 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef GOOGLE_PROTOBUF_STUBS_STATUS_H_
+#define GOOGLE_PROTOBUF_STUBS_STATUS_H_
+
+#include <iosfwd>
+#include <string>
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/stringpiece.h>
+
+namespace google {
+namespace protobuf {
+namespace util {
+namespace error {
+// These values must match error codes defined in google/rpc/code.proto.
+enum Code {
+  OK = 0,
+  CANCELLED = 1,
+  UNKNOWN = 2,
+  INVALID_ARGUMENT = 3,
+  DEADLINE_EXCEEDED = 4,
+  NOT_FOUND = 5,
+  ALREADY_EXISTS = 6,
+  PERMISSION_DENIED = 7,
+  UNAUTHENTICATED = 16,
+  RESOURCE_EXHAUSTED = 8,
+  FAILED_PRECONDITION = 9,
+  ABORTED = 10,
+  OUT_OF_RANGE = 11,
+  UNIMPLEMENTED = 12,
+  INTERNAL = 13,
+  UNAVAILABLE = 14,
+  DATA_LOSS = 15,
+};
+}  // namespace error
+
+class LIBPROTOBUF_EXPORT Status {
+ public:
+  // Creates a "successful" status.
+  Status();
+
+  // Create a status in the canonical error space with the specified
+  // code, and error message.  If "code == 0", error_message is
+  // ignored and a Status object identical to Status::OK is
+  // constructed.
+  Status(error::Code error_code, StringPiece error_message);
+  Status(const Status&);
+  Status& operator=(const Status& x);
+  ~Status() {}
+
+  // Some pre-defined Status objects
+  static const Status OK;             // Identical to 0-arg constructor
+  static const Status CANCELLED;
+  static const Status UNKNOWN;
+
+  // Accessor
+  bool ok() const {
+    return error_code_ == error::OK;
+  }
+  int error_code() const {
+    return error_code_;
+  }
+  StringPiece error_message() const {
+    return error_message_;
+  }
+
+  bool operator==(const Status& x) const;
+  bool operator!=(const Status& x) const {
+    return !operator==(x);
+  }
+
+  // Return a combination of the error code name and message.
+  string ToString() const;
+
+ private:
+  error::Code error_code_;
+  string error_message_;
+};
+
+// Prints a human-readable representation of 'x' to 'os'.
+LIBPROTOBUF_EXPORT ostream& operator<<(ostream& os, const Status& x);
+
+#define EXPECT_OK(value) EXPECT_TRUE((value).ok())
+
+}  // namespace util
+}  // namespace protobuf
+}  // namespace google
+#endif  // GOOGLE_PROTOBUF_STUBS_STATUS_H_
diff --git a/src/google/protobuf/stubs/status_macros.h b/src/google/protobuf/stubs/status_macros.h
new file mode 100644
index 0000000..743e79a
--- /dev/null
+++ b/src/google/protobuf/stubs/status_macros.h
@@ -0,0 +1,89 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// From: util/task/contrib/status_macros/status_macros.h
+
+#ifndef GOOGLE_PROTOBUF_STUBS_STATUS_MACROS_H_
+#define GOOGLE_PROTOBUF_STUBS_STATUS_MACROS_H_
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/status.h>
+#include <google/protobuf/stubs/statusor.h>
+
+namespace google {
+namespace protobuf {
+namespace util {
+
+// Run a command that returns a util::Status.  If the called code returns an
+// error status, return that status up out of this method too.
+//
+// Example:
+//   RETURN_IF_ERROR(DoThings(4));
+#define RETURN_IF_ERROR(expr) \
+  do { \
+    /* Using _status below to avoid capture problems if expr is "status". */ \
+    const ::google::protobuf::util::Status _status = (expr); \
+    if (GOOGLE_PREDICT_FALSE(!_status.ok())) return _status; \
+  } while (0)
+
+// Internal helper for concatenating macro values.
+#define STATUS_MACROS_CONCAT_NAME_INNER(x, y) x##y
+#define STATUS_MACROS_CONCAT_NAME(x, y) STATUS_MACROS_CONCAT_NAME_INNER(x, y)
+
+template<typename T>
+Status DoAssignOrReturn(T& lhs, StatusOr<T> result) {
+  if (result.ok()) {
+    lhs = result.ValueOrDie();
+  }
+  return result.status();
+}
+
+#define ASSIGN_OR_RETURN_IMPL(status, lhs, rexpr) \
+  Status status = DoAssignOrReturn(lhs, (rexpr)); \
+  if (GOOGLE_PREDICT_FALSE(!status.ok())) return status;
+
+// Executes an expression that returns a util::StatusOr, extracting its value
+// into the variable defined by lhs (or returning on error).
+//
+// Example: Assigning to an existing value
+//   ValueType value;
+//   ASSIGN_OR_RETURN(value, MaybeGetValue(arg));
+//
+// WARNING: ASSIGN_OR_RETURN expands into multiple statements; it cannot be used
+//  in a single statement (e.g. as the body of an if statement without {})!
+#define ASSIGN_OR_RETURN(lhs, rexpr) \
+  ASSIGN_OR_RETURN_IMPL( \
+      STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, rexpr);
+
+}  // namespace util
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_STATUS_H_
diff --git a/src/google/protobuf/stubs/status_test.cc b/src/google/protobuf/stubs/status_test.cc
new file mode 100644
index 0000000..c70c33c
--- /dev/null
+++ b/src/google/protobuf/stubs/status_test.cc
@@ -0,0 +1,131 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <google/protobuf/stubs/status.h>
+
+#include <stdio.h>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+namespace {
+TEST(Status, Empty) {
+  util::Status status;
+  EXPECT_EQ(util::error::OK, util::Status::OK.error_code());
+  EXPECT_EQ("OK", util::Status::OK.ToString());
+}
+
+TEST(Status, GenericCodes) {
+  EXPECT_EQ(util::error::OK, util::Status::OK.error_code());
+  EXPECT_EQ(util::error::CANCELLED, util::Status::CANCELLED.error_code());
+  EXPECT_EQ(util::error::UNKNOWN, util::Status::UNKNOWN.error_code());
+}
+
+TEST(Status, ConstructorZero) {
+  util::Status status(util::error::OK, "msg");
+  EXPECT_TRUE(status.ok());
+  EXPECT_EQ("OK", status.ToString());
+}
+
+TEST(Status, CheckOK) {
+  util::Status status;
+  GOOGLE_CHECK_OK(status);
+  GOOGLE_CHECK_OK(status) << "Failed";
+  GOOGLE_DCHECK_OK(status) << "Failed";
+}
+
+TEST(Status, ErrorMessage) {
+  util::Status status(util::error::INVALID_ARGUMENT, "");
+  EXPECT_FALSE(status.ok());
+  EXPECT_EQ("", status.error_message().ToString());
+  EXPECT_EQ("INVALID_ARGUMENT", status.ToString());
+  status = util::Status(util::error::INVALID_ARGUMENT, "msg");
+  EXPECT_FALSE(status.ok());
+  EXPECT_EQ("msg", status.error_message().ToString());
+  EXPECT_EQ("INVALID_ARGUMENT:msg", status.ToString());
+  status = util::Status(util::error::OK, "msg");
+  EXPECT_TRUE(status.ok());
+  EXPECT_EQ("", status.error_message().ToString());
+  EXPECT_EQ("OK", status.ToString());
+}
+
+TEST(Status, Copy) {
+  util::Status a(util::error::UNKNOWN, "message");
+  util::Status b(a);
+  ASSERT_EQ(a.ToString(), b.ToString());
+}
+
+TEST(Status, Assign) {
+  util::Status a(util::error::UNKNOWN, "message");
+  util::Status b;
+  b = a;
+  ASSERT_EQ(a.ToString(), b.ToString());
+}
+
+TEST(Status, AssignEmpty) {
+  util::Status a(util::error::UNKNOWN, "message");
+  util::Status b;
+  a = b;
+  ASSERT_EQ(string("OK"), a.ToString());
+  ASSERT_TRUE(b.ok());
+  ASSERT_TRUE(a.ok());
+}
+
+TEST(Status, EqualsOK) {
+  ASSERT_EQ(util::Status::OK, util::Status());
+}
+
+TEST(Status, EqualsSame) {
+  const util::Status a = util::Status(util::error::CANCELLED, "message");
+  const util::Status b = util::Status(util::error::CANCELLED, "message");
+  ASSERT_EQ(a, b);
+}
+
+TEST(Status, EqualsCopy) {
+  const util::Status a = util::Status(util::error::CANCELLED, "message");
+  const util::Status b = a;
+  ASSERT_EQ(a, b);
+}
+
+TEST(Status, EqualsDifferentCode) {
+  const util::Status a = util::Status(util::error::CANCELLED, "message");
+  const util::Status b = util::Status(util::error::UNKNOWN, "message");
+  ASSERT_NE(a, b);
+}
+
+TEST(Status, EqualsDifferentMessage) {
+  const util::Status a = util::Status(util::error::CANCELLED, "message");
+  const util::Status b = util::Status(util::error::CANCELLED, "another");
+  ASSERT_NE(a, b);
+}
+}  // namespace
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/statusor.cc b/src/google/protobuf/stubs/statusor.cc
new file mode 100644
index 0000000..48d1402
--- /dev/null
+++ b/src/google/protobuf/stubs/statusor.cc
@@ -0,0 +1,46 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <google/protobuf/stubs/statusor.h>
+
+namespace google {
+namespace protobuf {
+namespace util {
+namespace internal {
+
+void StatusOrHelper::Crash(const Status& status) {
+  GOOGLE_LOG(FATAL) << "Attempting to fetch value instead of handling error "
+                    << status.ToString();
+}
+
+}  // namespace internal
+}  // namespace util
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/statusor.h b/src/google/protobuf/stubs/statusor.h
new file mode 100644
index 0000000..ad84870
--- /dev/null
+++ b/src/google/protobuf/stubs/statusor.h
@@ -0,0 +1,259 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// StatusOr<T> is the union of a Status object and a T
+// object. StatusOr models the concept of an object that is either a
+// usable value, or an error Status explaining why such a value is
+// not present. To this end, StatusOr<T> does not allow its Status
+// value to be Status::OK. Further, StatusOr<T*> does not allow the
+// contained pointer to be NULL.
+//
+// The primary use-case for StatusOr<T> is as the return value of a
+// function which may fail.
+//
+// Example client usage for a StatusOr<T>, where T is not a pointer:
+//
+//  StatusOr<float> result = DoBigCalculationThatCouldFail();
+//  if (result.ok()) {
+//    float answer = result.ValueOrDie();
+//    printf("Big calculation yielded: %f", answer);
+//  } else {
+//    LOG(ERROR) << result.status();
+//  }
+//
+// Example client usage for a StatusOr<T*>:
+//
+//  StatusOr<Foo*> result = FooFactory::MakeNewFoo(arg);
+//  if (result.ok()) {
+//    std::unique_ptr<Foo> foo(result.ValueOrDie());
+//    foo->DoSomethingCool();
+//  } else {
+//    LOG(ERROR) << result.status();
+//  }
+//
+// Example client usage for a StatusOr<std::unique_ptr<T>>:
+//
+//  StatusOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);
+//  if (result.ok()) {
+//    std::unique_ptr<Foo> foo = result.ConsumeValueOrDie();
+//    foo->DoSomethingCool();
+//  } else {
+//    LOG(ERROR) << result.status();
+//  }
+//
+// Example factory implementation returning StatusOr<T*>:
+//
+//  StatusOr<Foo*> FooFactory::MakeNewFoo(int arg) {
+//    if (arg <= 0) {
+//      return ::util::Status(::util::error::INVALID_ARGUMENT,
+//                            "Arg must be positive");
+//    } else {
+//      return new Foo(arg);
+//    }
+//  }
+//
+
+#ifndef GOOGLE_PROTOBUF_STUBS_STATUSOR_H_
+#define GOOGLE_PROTOBUF_STUBS_STATUSOR_H_
+
+#include <new>
+#include <string>
+#include <utility>
+
+#include <google/protobuf/stubs/status.h>
+
+namespace google {
+namespace protobuf {
+namespace util {
+
+template<typename T>
+class StatusOr {
+  template<typename U> friend class StatusOr;
+
+ public:
+  // Construct a new StatusOr with Status::UNKNOWN status
+  StatusOr();
+
+  // Construct a new StatusOr with the given non-ok status. After calling
+  // this constructor, calls to ValueOrDie() will CHECK-fail.
+  //
+  // NOTE: Not explicit - we want to use StatusOr<T> as a return
+  // value, so it is convenient and sensible to be able to do 'return
+  // Status()' when the return type is StatusOr<T>.
+  //
+  // REQUIRES: status != Status::OK. This requirement is DCHECKed.
+  // In optimized builds, passing Status::OK here will have the effect
+  // of passing PosixErrorSpace::EINVAL as a fallback.
+  StatusOr(const Status& status);  // NOLINT
+
+  // Construct a new StatusOr with the given value. If T is a plain pointer,
+  // value must not be NULL. After calling this constructor, calls to
+  // ValueOrDie() will succeed, and calls to status() will return OK.
+  //
+  // NOTE: Not explicit - we want to use StatusOr<T> as a return type
+  // so it is convenient and sensible to be able to do 'return T()'
+  // when when the return type is StatusOr<T>.
+  //
+  // REQUIRES: if T is a plain pointer, value != NULL. This requirement is
+  // DCHECKed. In optimized builds, passing a NULL pointer here will have
+  // the effect of passing PosixErrorSpace::EINVAL as a fallback.
+  StatusOr(const T& value);  // NOLINT
+
+  // Copy constructor.
+  StatusOr(const StatusOr& other);
+
+  // Conversion copy constructor, T must be copy constructible from U
+  template<typename U>
+  StatusOr(const StatusOr<U>& other);
+
+  // Assignment operator.
+  StatusOr& operator=(const StatusOr& other);
+
+  // Conversion assignment operator, T must be assignable from U
+  template<typename U>
+  StatusOr& operator=(const StatusOr<U>& other);
+
+  // Returns a reference to our status. If this contains a T, then
+  // returns Status::OK.
+  const Status& status() const;
+
+  // Returns this->status().ok()
+  bool ok() const;
+
+  // Returns a reference to our current value, or CHECK-fails if !this->ok().
+  // If you need to initialize a T object from the stored value,
+  // ConsumeValueOrDie() may be more efficient.
+  const T& ValueOrDie() const;
+
+ private:
+  Status status_;
+  T value_;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation details for StatusOr<T>
+
+namespace internal {
+
+class LIBPROTOBUF_EXPORT StatusOrHelper {
+ public:
+  // Move type-agnostic error handling to the .cc.
+  static void Crash(const util::Status& status);
+
+  // Customized behavior for StatusOr<T> vs. StatusOr<T*>
+  template<typename T>
+  struct Specialize;
+};
+
+template<typename T>
+struct StatusOrHelper::Specialize {
+  // For non-pointer T, a reference can never be NULL.
+  static inline bool IsValueNull(const T& t) { return false; }
+};
+
+template<typename T>
+struct StatusOrHelper::Specialize<T*> {
+  static inline bool IsValueNull(const T* t) { return t == NULL; }
+};
+
+}  // namespace internal
+
+template<typename T>
+inline StatusOr<T>::StatusOr()
+    : status_(util::Status::UNKNOWN) {
+}
+
+template<typename T>
+inline StatusOr<T>::StatusOr(const Status& status) {
+  if (status.ok()) {
+    status_ = Status(error::INTERNAL, "Status::OK is not a valid argument.");
+  } else {
+    status_ = status;
+  }
+}
+
+template<typename T>
+inline StatusOr<T>::StatusOr(const T& value) {
+  if (internal::StatusOrHelper::Specialize<T>::IsValueNull(value)) {
+    status_ = Status(error::INTERNAL, "NULL is not a vaild argument.");
+  } else {
+    status_ = Status::OK;
+    value_ = value;
+  }
+}
+
+template<typename T>
+inline StatusOr<T>::StatusOr(const StatusOr<T>& other)
+    : status_(other.status_), value_(other.value_) {
+}
+
+template<typename T>
+inline StatusOr<T>& StatusOr<T>::operator=(const StatusOr<T>& other) {
+  status_ = other.status_;
+  value_ = other.value_;
+  return *this;
+}
+
+template<typename T>
+template<typename U>
+inline StatusOr<T>::StatusOr(const StatusOr<U>& other)
+    : status_(other.status_), value_(other.status_.ok() ? other.value_ : NULL) {
+}
+
+template<typename T>
+template<typename U>
+inline StatusOr<T>& StatusOr<T>::operator=(const StatusOr<U>& other) {
+  status_ = other.status_;
+  if (status_.ok()) value_ = other.value_;
+  return *this;
+}
+
+template<typename T>
+inline const Status& StatusOr<T>::status() const {
+  return status_;
+}
+
+template<typename T>
+inline bool StatusOr<T>::ok() const {
+  return status().ok();
+}
+
+template<typename T>
+inline const T& StatusOr<T>::ValueOrDie() const {
+  if (!status_.ok()) {
+    internal::StatusOrHelper::Crash(status_);
+  }
+  return value_;
+}
+}  // namespace util
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_STATUSOR_H_
diff --git a/src/google/protobuf/stubs/statusor_test.cc b/src/google/protobuf/stubs/statusor_test.cc
new file mode 100644
index 0000000..6e2a9e5
--- /dev/null
+++ b/src/google/protobuf/stubs/statusor_test.cc
@@ -0,0 +1,274 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <google/protobuf/stubs/statusor.h>
+
+#include <errno.h>
+#include <memory>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+namespace util {
+namespace {
+
+class Base1 {
+ public:
+  virtual ~Base1() {}
+  int pad;
+};
+
+class Base2 {
+ public:
+  virtual ~Base2() {}
+  int yetotherpad;
+};
+
+class Derived : public Base1, public Base2 {
+ public:
+  virtual ~Derived() {}
+  int evenmorepad;
+};
+
+class CopyNoAssign {
+ public:
+  explicit CopyNoAssign(int value) : foo(value) {}
+  CopyNoAssign(const CopyNoAssign& other) : foo(other.foo) {}
+  int foo;
+ private:
+  const CopyNoAssign& operator=(const CopyNoAssign&);
+};
+
+TEST(StatusOr, TestDefaultCtor) {
+  StatusOr<int> thing;
+  EXPECT_FALSE(thing.ok());
+  EXPECT_EQ(Status::UNKNOWN, thing.status());
+}
+
+TEST(StatusOr, TestStatusCtor) {
+  StatusOr<int> thing(Status::CANCELLED);
+  EXPECT_FALSE(thing.ok());
+  EXPECT_EQ(Status::CANCELLED, thing.status());
+}
+
+TEST(StatusOr, TestValueCtor) {
+  const int kI = 4;
+  StatusOr<int> thing(kI);
+  EXPECT_TRUE(thing.ok());
+  EXPECT_EQ(kI, thing.ValueOrDie());
+}
+
+TEST(StatusOr, TestCopyCtorStatusOk) {
+  const int kI = 4;
+  StatusOr<int> original(kI);
+  StatusOr<int> copy(original);
+  EXPECT_EQ(original.status(), copy.status());
+  EXPECT_EQ(original.ValueOrDie(), copy.ValueOrDie());
+}
+
+TEST(StatusOr, TestCopyCtorStatusNotOk) {
+  StatusOr<int> original(Status::CANCELLED);
+  StatusOr<int> copy(original);
+  EXPECT_EQ(original.status(), copy.status());
+}
+
+TEST(StatusOr, TestCopyCtorStatusOKConverting) {
+  const int kI = 4;
+  StatusOr<int>    original(kI);
+  StatusOr<double> copy(original);
+  EXPECT_EQ(original.status(), copy.status());
+  EXPECT_EQ(original.ValueOrDie(), copy.ValueOrDie());
+}
+
+TEST(StatusOr, TestCopyCtorStatusNotOkConverting) {
+  StatusOr<int>    original(Status::CANCELLED);
+  StatusOr<double> copy(original);
+  EXPECT_EQ(original.status(), copy.status());
+}
+
+TEST(StatusOr, TestAssignmentStatusOk) {
+  const int kI = 4;
+  StatusOr<int> source(kI);
+  StatusOr<int> target;
+  target = source;
+  EXPECT_EQ(source.status(), target.status());
+  EXPECT_EQ(source.ValueOrDie(), target.ValueOrDie());
+}
+
+TEST(StatusOr, TestAssignmentStatusNotOk) {
+  StatusOr<int> source(Status::CANCELLED);
+  StatusOr<int> target;
+  target = source;
+  EXPECT_EQ(source.status(), target.status());
+}
+
+TEST(StatusOr, TestAssignmentStatusOKConverting) {
+  const int kI = 4;
+  StatusOr<int>    source(kI);
+  StatusOr<double> target;
+  target = source;
+  EXPECT_EQ(source.status(), target.status());
+  EXPECT_DOUBLE_EQ(source.ValueOrDie(), target.ValueOrDie());
+}
+
+TEST(StatusOr, TestAssignmentStatusNotOkConverting) {
+  StatusOr<int>    source(Status::CANCELLED);
+  StatusOr<double> target;
+  target = source;
+  EXPECT_EQ(source.status(), target.status());
+}
+
+TEST(StatusOr, TestStatus) {
+  StatusOr<int> good(4);
+  EXPECT_TRUE(good.ok());
+  StatusOr<int> bad(Status::CANCELLED);
+  EXPECT_FALSE(bad.ok());
+  EXPECT_EQ(Status::CANCELLED, bad.status());
+}
+
+TEST(StatusOr, TestValue) {
+  const int kI = 4;
+  StatusOr<int> thing(kI);
+  EXPECT_EQ(kI, thing.ValueOrDie());
+}
+
+TEST(StatusOr, TestValueConst) {
+  const int kI = 4;
+  const StatusOr<int> thing(kI);
+  EXPECT_EQ(kI, thing.ValueOrDie());
+}
+
+TEST(StatusOr, TestPointerDefaultCtor) {
+  StatusOr<int*> thing;
+  EXPECT_FALSE(thing.ok());
+  EXPECT_EQ(Status::UNKNOWN, thing.status());
+}
+
+TEST(StatusOr, TestPointerStatusCtor) {
+  StatusOr<int*> thing(Status::CANCELLED);
+  EXPECT_FALSE(thing.ok());
+  EXPECT_EQ(Status::CANCELLED, thing.status());
+}
+
+TEST(StatusOr, TestPointerValueCtor) {
+  const int kI = 4;
+  StatusOr<const int*> thing(&kI);
+  EXPECT_TRUE(thing.ok());
+  EXPECT_EQ(&kI, thing.ValueOrDie());
+}
+
+TEST(StatusOr, TestPointerCopyCtorStatusOk) {
+  const int kI = 0;
+  StatusOr<const int*> original(&kI);
+  StatusOr<const int*> copy(original);
+  EXPECT_EQ(original.status(), copy.status());
+  EXPECT_EQ(original.ValueOrDie(), copy.ValueOrDie());
+}
+
+TEST(StatusOr, TestPointerCopyCtorStatusNotOk) {
+  StatusOr<int*> original(Status::CANCELLED);
+  StatusOr<int*> copy(original);
+  EXPECT_EQ(original.status(), copy.status());
+}
+
+TEST(StatusOr, TestPointerCopyCtorStatusOKConverting) {
+  Derived derived;
+  StatusOr<Derived*> original(&derived);
+  StatusOr<Base2*>   copy(original);
+  EXPECT_EQ(original.status(), copy.status());
+  EXPECT_EQ(static_cast<const Base2*>(original.ValueOrDie()),
+            copy.ValueOrDie());
+}
+
+TEST(StatusOr, TestPointerCopyCtorStatusNotOkConverting) {
+  StatusOr<Derived*> original(Status::CANCELLED);
+  StatusOr<Base2*>   copy(original);
+  EXPECT_EQ(original.status(), copy.status());
+}
+
+TEST(StatusOr, TestPointerAssignmentStatusOk) {
+  const int kI = 0;
+  StatusOr<const int*> source(&kI);
+  StatusOr<const int*> target;
+  target = source;
+  EXPECT_EQ(source.status(), target.status());
+  EXPECT_EQ(source.ValueOrDie(), target.ValueOrDie());
+}
+
+TEST(StatusOr, TestPointerAssignmentStatusNotOk) {
+  StatusOr<int*> source(Status::CANCELLED);
+  StatusOr<int*> target;
+  target = source;
+  EXPECT_EQ(source.status(), target.status());
+}
+
+TEST(StatusOr, TestPointerAssignmentStatusOKConverting) {
+  Derived derived;
+  StatusOr<Derived*> source(&derived);
+  StatusOr<Base2*>   target;
+  target = source;
+  EXPECT_EQ(source.status(), target.status());
+  EXPECT_EQ(static_cast<const Base2*>(source.ValueOrDie()),
+            target.ValueOrDie());
+}
+
+TEST(StatusOr, TestPointerAssignmentStatusNotOkConverting) {
+  StatusOr<Derived*> source(Status::CANCELLED);
+  StatusOr<Base2*>   target;
+  target = source;
+  EXPECT_EQ(source.status(), target.status());
+}
+
+TEST(StatusOr, TestPointerStatus) {
+  const int kI = 0;
+  StatusOr<const int*> good(&kI);
+  EXPECT_TRUE(good.ok());
+  StatusOr<const int*> bad(Status::CANCELLED);
+  EXPECT_EQ(Status::CANCELLED, bad.status());
+}
+
+TEST(StatusOr, TestPointerValue) {
+  const int kI = 0;
+  StatusOr<const int*> thing(&kI);
+  EXPECT_EQ(&kI, thing.ValueOrDie());
+}
+
+TEST(StatusOr, TestPointerValueConst) {
+  const int kI = 0;
+  const StatusOr<const int*> thing(&kI);
+  EXPECT_EQ(&kI, thing.ValueOrDie());
+}
+
+}  // namespace
+}  // namespace util
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/stl_util.h b/src/google/protobuf/stubs/stl_util.h
new file mode 100644
index 0000000..9e4c82a
--- /dev/null
+++ b/src/google/protobuf/stubs/stl_util.h
@@ -0,0 +1,121 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// from google3/util/gtl/stl_util.h
+
+#ifndef GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__
+#define GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__
+
+#include <google/protobuf/stubs/common.h>
+
+namespace google {
+namespace protobuf {
+
+// STLDeleteContainerPointers()
+//  For a range within a container of pointers, calls delete
+//  (non-array version) on these pointers.
+// NOTE: for these three functions, we could just implement a DeleteObject
+// functor and then call for_each() on the range and functor, but this
+// requires us to pull in all of algorithm.h, which seems expensive.
+// For hash_[multi]set, it is important that this deletes behind the iterator
+// because the hash_set may call the hash function on the iterator when it is
+// advanced, which could result in the hash function trying to deference a
+// stale pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPointers(ForwardIterator begin,
+                                ForwardIterator end) {
+  while (begin != end) {
+    ForwardIterator temp = begin;
+    ++begin;
+    delete *temp;
+  }
+}
+
+// Inside Google, this function implements a horrible, disgusting hack in which
+// we reach into the string's private implementation and resize it without
+// initializing the new bytes.  In some cases doing this can significantly
+// improve performance.  However, since it's totally non-portable it has no
+// place in open source code.  Feel free to fill this function in with your
+// own disgusting hack if you want the perf boost.
+inline void STLStringResizeUninitialized(string* s, size_t new_size) {
+  s->resize(new_size);
+}
+
+// Return a mutable char* pointing to a string's internal buffer,
+// which may not be null-terminated. Writing through this pointer will
+// modify the string.
+//
+// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
+// next call to a string method that invalidates iterators.
+//
+// As of 2006-04, there is no standard-blessed way of getting a
+// mutable reference to a string's internal buffer. However, issue 530
+// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#530)
+// proposes this as the method. According to Matt Austern, this should
+// already work on all current implementations.
+inline char* string_as_array(string* str) {
+  // DO NOT USE const_cast<char*>(str->data())! See the unittest for why.
+  return str->empty() ? NULL : &*str->begin();
+}
+
+// STLDeleteElements() deletes all the elements in an STL container and clears
+// the container.  This function is suitable for use with a vector, set,
+// hash_set, or any other STL container which defines sensible begin(), end(),
+// and clear() methods.
+//
+// If container is NULL, this function is a no-op.
+//
+// As an alternative to calling STLDeleteElements() directly, consider
+// ElementDeleter (defined below), which ensures that your container's elements
+// are deleted when the ElementDeleter goes out of scope.
+template <class T>
+void STLDeleteElements(T *container) {
+  if (!container) return;
+  STLDeleteContainerPointers(container->begin(), container->end());
+  container->clear();
+}
+
+// Given an STL container consisting of (key, value) pairs, STLDeleteValues
+// deletes all the "value" components and clears the container.  Does nothing
+// in the case it's given a NULL pointer.
+
+template <class T>
+void STLDeleteValues(T *v) {
+  if (!v) return;
+  for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
+    delete i->second;
+  }
+  v->clear();
+}
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__
diff --git a/src/google/protobuf/stubs/stringpiece.cc b/src/google/protobuf/stubs/stringpiece.cc
new file mode 100644
index 0000000..989474b
--- /dev/null
+++ b/src/google/protobuf/stubs/stringpiece.cc
@@ -0,0 +1,268 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <google/protobuf/stubs/stringpiece.h>
+
+#include <string.h>
+#include <algorithm>
+#include <climits>
+#include <string>
+#include <ostream>
+
+namespace google {
+namespace protobuf {
+std::ostream& operator<<(std::ostream& o, StringPiece piece) {
+  o.write(piece.data(), piece.size());
+  return o;
+}
+
+// Out-of-line error path.
+void StringPiece::LogFatalSizeTooBig(size_t size, const char* details) {
+  GOOGLE_LOG(FATAL) << "size too big: " << size << " details: " << details;
+}
+
+StringPiece::StringPiece(StringPiece x, stringpiece_ssize_type pos)
+    : ptr_(x.ptr_ + pos), length_(x.length_ - pos) {
+  GOOGLE_DCHECK_LE(0, pos);
+  GOOGLE_DCHECK_LE(pos, x.length_);
+}
+
+StringPiece::StringPiece(StringPiece x,
+                         stringpiece_ssize_type pos,
+                         stringpiece_ssize_type len)
+    : ptr_(x.ptr_ + pos), length_(std::min(len, x.length_ - pos)) {
+  GOOGLE_DCHECK_LE(0, pos);
+  GOOGLE_DCHECK_LE(pos, x.length_);
+  GOOGLE_DCHECK_GE(len, 0);
+}
+
+void StringPiece::CopyToString(string* target) const {
+  target->assign(ptr_, length_);
+}
+
+void StringPiece::AppendToString(string* target) const {
+  target->append(ptr_, length_);
+}
+
+bool StringPiece::Consume(StringPiece x) {
+  if (starts_with(x)) {
+    ptr_ += x.length_;
+    length_ -= x.length_;
+    return true;
+  }
+  return false;
+}
+
+bool StringPiece::ConsumeFromEnd(StringPiece x) {
+  if (ends_with(x)) {
+    length_ -= x.length_;
+    return true;
+  }
+  return false;
+}
+
+stringpiece_ssize_type StringPiece::copy(char* buf,
+                                         size_type n,
+                                         size_type pos) const {
+  stringpiece_ssize_type ret = std::min(length_ - pos, n);
+  memcpy(buf, ptr_ + pos, ret);
+  return ret;
+}
+
+bool StringPiece::contains(StringPiece s) const {
+  return find(s, 0) != npos;
+}
+
+stringpiece_ssize_type StringPiece::find(StringPiece s, size_type pos) const {
+  if (length_ <= 0 || pos > static_cast<size_type>(length_)) {
+    if (length_ == 0 && pos == 0 && s.length_ == 0) return 0;
+    return npos;
+  }
+  const char *result = std::search(ptr_ + pos, ptr_ + length_,
+                                   s.ptr_, s.ptr_ + s.length_);
+  return result == ptr_ + length_ ? npos : result - ptr_;
+}
+
+stringpiece_ssize_type StringPiece::find(char c, size_type pos) const {
+  if (length_ <= 0 || pos >= static_cast<size_type>(length_)) {
+    return npos;
+  }
+  const char* result = static_cast<const char*>(
+      memchr(ptr_ + pos, c, length_ - pos));
+  return result != NULL ? result - ptr_ : npos;
+}
+
+stringpiece_ssize_type StringPiece::rfind(StringPiece s, size_type pos) const {
+  if (length_ < s.length_) return npos;
+  const size_t ulen = length_;
+  if (s.length_ == 0) return std::min(ulen, pos);
+
+  const char* last = ptr_ + std::min(ulen - s.length_, pos) + s.length_;
+  const char* result = std::find_end(ptr_, last, s.ptr_, s.ptr_ + s.length_);
+  return result != last ? result - ptr_ : npos;
+}
+
+// Search range is [0..pos] inclusive.  If pos == npos, search everything.
+stringpiece_ssize_type StringPiece::rfind(char c, size_type pos) const {
+  // Note: memrchr() is not available on Windows.
+  if (length_ <= 0) return npos;
+  for (stringpiece_ssize_type i =
+      std::min(pos, static_cast<size_type>(length_ - 1));
+       i >= 0; --i) {
+    if (ptr_[i] == c) {
+      return i;
+    }
+  }
+  return npos;
+}
+
+// For each character in characters_wanted, sets the index corresponding
+// to the ASCII code of that character to 1 in table.  This is used by
+// the find_.*_of methods below to tell whether or not a character is in
+// the lookup table in constant time.
+// The argument `table' must be an array that is large enough to hold all
+// the possible values of an unsigned char.  Thus it should be be declared
+// as follows:
+//   bool table[UCHAR_MAX + 1]
+static inline void BuildLookupTable(StringPiece characters_wanted,
+                                    bool* table) {
+  const stringpiece_ssize_type length = characters_wanted.length();
+  const char* const data = characters_wanted.data();
+  for (stringpiece_ssize_type i = 0; i < length; ++i) {
+    table[static_cast<unsigned char>(data[i])] = true;
+  }
+}
+
+stringpiece_ssize_type StringPiece::find_first_of(StringPiece s,
+                                                  size_type pos) const {
+  if (length_ <= 0 || s.length_ <= 0) {
+    return npos;
+  }
+  // Avoid the cost of BuildLookupTable() for a single-character search.
+  if (s.length_ == 1) return find_first_of(s.ptr_[0], pos);
+
+  bool lookup[UCHAR_MAX + 1] = { false };
+  BuildLookupTable(s, lookup);
+  for (stringpiece_ssize_type i = pos; i < length_; ++i) {
+    if (lookup[static_cast<unsigned char>(ptr_[i])]) {
+      return i;
+    }
+  }
+  return npos;
+}
+
+stringpiece_ssize_type StringPiece::find_first_not_of(StringPiece s,
+                                                      size_type pos) const {
+  if (length_ <= 0) return npos;
+  if (s.length_ <= 0) return 0;
+  // Avoid the cost of BuildLookupTable() for a single-character search.
+  if (s.length_ == 1) return find_first_not_of(s.ptr_[0], pos);
+
+  bool lookup[UCHAR_MAX + 1] = { false };
+  BuildLookupTable(s, lookup);
+  for (stringpiece_ssize_type i = pos; i < length_; ++i) {
+    if (!lookup[static_cast<unsigned char>(ptr_[i])]) {
+      return i;
+    }
+  }
+  return npos;
+}
+
+stringpiece_ssize_type StringPiece::find_first_not_of(char c,
+                                                      size_type pos) const {
+  if (length_ <= 0) return npos;
+
+  for (; pos < static_cast<size_type>(length_); ++pos) {
+    if (ptr_[pos] != c) {
+      return pos;
+    }
+  }
+  return npos;
+}
+
+stringpiece_ssize_type StringPiece::find_last_of(StringPiece s,
+                                                 size_type pos) const {
+  if (length_ <= 0 || s.length_ <= 0) return npos;
+  // Avoid the cost of BuildLookupTable() for a single-character search.
+  if (s.length_ == 1) return find_last_of(s.ptr_[0], pos);
+
+  bool lookup[UCHAR_MAX + 1] = { false };
+  BuildLookupTable(s, lookup);
+  for (stringpiece_ssize_type i =
+       std::min(pos, static_cast<size_type>(length_ - 1)); i >= 0; --i) {
+    if (lookup[static_cast<unsigned char>(ptr_[i])]) {
+      return i;
+    }
+  }
+  return npos;
+}
+
+stringpiece_ssize_type StringPiece::find_last_not_of(StringPiece s,
+                                                     size_type pos) const {
+  if (length_ <= 0) return npos;
+
+  stringpiece_ssize_type i = std::min(pos, static_cast<size_type>(length_ - 1));
+  if (s.length_ <= 0) return i;
+
+  // Avoid the cost of BuildLookupTable() for a single-character search.
+  if (s.length_ == 1) return find_last_not_of(s.ptr_[0], pos);
+
+  bool lookup[UCHAR_MAX + 1] = { false };
+  BuildLookupTable(s, lookup);
+  for (; i >= 0; --i) {
+    if (!lookup[static_cast<unsigned char>(ptr_[i])]) {
+      return i;
+    }
+  }
+  return npos;
+}
+
+stringpiece_ssize_type StringPiece::find_last_not_of(char c,
+                                                     size_type pos) const {
+  if (length_ <= 0) return npos;
+
+  for (stringpiece_ssize_type i =
+       std::min(pos, static_cast<size_type>(length_ - 1)); i >= 0; --i) {
+    if (ptr_[i] != c) {
+      return i;
+    }
+  }
+  return npos;
+}
+
+StringPiece StringPiece::substr(size_type pos, size_type n) const {
+  if (pos > length_) pos = length_;
+  if (n > length_ - pos) n = length_ - pos;
+  return StringPiece(ptr_ + pos, n);
+}
+
+const StringPiece::size_type StringPiece::npos = size_type(-1);
+
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/stringpiece.h b/src/google/protobuf/stubs/stringpiece.h
new file mode 100644
index 0000000..ec3ffd5
--- /dev/null
+++ b/src/google/protobuf/stubs/stringpiece.h
@@ -0,0 +1,453 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A StringPiece points to part or all of a string, Cord, double-quoted string
+// literal, or other string-like object.  A StringPiece does *not* own the
+// string to which it points.  A StringPiece is not null-terminated.
+//
+// You can use StringPiece as a function or method parameter.  A StringPiece
+// parameter can receive a double-quoted string literal argument, a "const
+// char*" argument, a string argument, or a StringPiece argument with no data
+// copying.  Systematic use of StringPiece for arguments reduces data
+// copies and strlen() calls.
+//
+// Prefer passing StringPieces by value:
+//   void MyFunction(StringPiece arg);
+// If circumstances require, you may also pass by const reference:
+//   void MyFunction(const StringPiece& arg);  // not preferred
+// Both of these have the same lifetime semantics.  Passing by value
+// generates slightly smaller code.  For more discussion, see the thread
+// go/stringpiecebyvalue on c-users.
+//
+// StringPiece is also suitable for local variables if you know that
+// the lifetime of the underlying object is longer than the lifetime
+// of your StringPiece variable.
+//
+// Beware of binding a StringPiece to a temporary:
+//   StringPiece sp = obj.MethodReturningString();  // BAD: lifetime problem
+//
+// This code is okay:
+//   string str = obj.MethodReturningString();  // str owns its contents
+//   StringPiece sp(str);  // GOOD, because str outlives sp
+//
+// StringPiece is sometimes a poor choice for a return value and usually a poor
+// choice for a data member.  If you do use a StringPiece this way, it is your
+// responsibility to ensure that the object pointed to by the StringPiece
+// outlives the StringPiece.
+//
+// A StringPiece may represent just part of a string; thus the name "Piece".
+// For example, when splitting a string, vector<StringPiece> is a natural data
+// type for the output.  For another example, a Cord is a non-contiguous,
+// potentially very long string-like object.  The Cord class has an interface
+// that iteratively provides StringPiece objects that point to the
+// successive pieces of a Cord object.
+//
+// A StringPiece is not null-terminated.  If you write code that scans a
+// StringPiece, you must check its length before reading any characters.
+// Common idioms that work on null-terminated strings do not work on
+// StringPiece objects.
+//
+// There are several ways to create a null StringPiece:
+//   StringPiece()
+//   StringPiece(NULL)
+//   StringPiece(NULL, 0)
+// For all of the above, sp.data() == NULL, sp.length() == 0,
+// and sp.empty() == true.  Also, if you create a StringPiece with
+// a non-NULL pointer then sp.data() != NULL.  Once created,
+// sp.data() will stay either NULL or not-NULL, except if you call
+// sp.clear() or sp.set().
+//
+// Thus, you can use StringPiece(NULL) to signal an out-of-band value
+// that is different from other StringPiece values.  This is similar
+// to the way that const char* p1 = NULL; is different from
+// const char* p2 = "";.
+//
+// There are many ways to create an empty StringPiece:
+//   StringPiece()
+//   StringPiece(NULL)
+//   StringPiece(NULL, 0)
+//   StringPiece("")
+//   StringPiece("", 0)
+//   StringPiece("abcdef", 0)
+//   StringPiece("abcdef"+6, 0)
+// For all of the above, sp.length() will be 0 and sp.empty() will be true.
+// For some empty StringPiece values, sp.data() will be NULL.
+// For some empty StringPiece values, sp.data() will not be NULL.
+//
+// Be careful not to confuse: null StringPiece and empty StringPiece.
+// The set of empty StringPieces properly includes the set of null StringPieces.
+// That is, every null StringPiece is an empty StringPiece,
+// but some non-null StringPieces are empty Stringpieces too.
+//
+// All empty StringPiece values compare equal to each other.
+// Even a null StringPieces compares equal to a non-null empty StringPiece:
+//  StringPiece() == StringPiece("", 0)
+//  StringPiece(NULL) == StringPiece("abc", 0)
+//  StringPiece(NULL, 0) == StringPiece("abcdef"+6, 0)
+//
+// Look carefully at this example:
+//   StringPiece("") == NULL
+// True or false?  TRUE, because StringPiece::operator== converts
+// the right-hand side from NULL to StringPiece(NULL),
+// and then compares two zero-length spans of characters.
+// However, we are working to make this example produce a compile error.
+//
+// Suppose you want to write:
+//   bool TestWhat?(StringPiece sp) { return sp == NULL; }  // BAD
+// Do not do that.  Write one of these instead:
+//   bool TestNull(StringPiece sp) { return sp.data() == NULL; }
+//   bool TestEmpty(StringPiece sp) { return sp.empty(); }
+// The intent of TestWhat? is unclear.  Did you mean TestNull or TestEmpty?
+// Right now, TestWhat? behaves likes TestEmpty.
+// We are working to make TestWhat? produce a compile error.
+// TestNull is good to test for an out-of-band signal.
+// TestEmpty is good to test for an empty StringPiece.
+//
+// Caveats (again):
+// (1) The lifetime of the pointed-to string (or piece of a string)
+//     must be longer than the lifetime of the StringPiece.
+// (2) There may or may not be a '\0' character after the end of
+//     StringPiece data.
+// (3) A null StringPiece is empty.
+//     An empty StringPiece may or may not be a null StringPiece.
+
+#ifndef GOOGLE_PROTOBUF_STUBS_STRINGPIECE_H_
+#define GOOGLE_PROTOBUF_STUBS_STRINGPIECE_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <string.h>
+#include <iosfwd>
+#include <limits>
+#include <string>
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/hash.h>
+
+namespace google {
+namespace protobuf {
+// StringPiece has *two* size types.
+// StringPiece::size_type
+//   is unsigned
+//   is 32 bits in LP32, 64 bits in LP64, 64 bits in LLP64
+//   no future changes intended
+// stringpiece_ssize_type
+//   is signed
+//   is 32 bits in LP32, 64 bits in LP64, 64 bits in LLP64
+//   future changes intended: http://go/64BitStringPiece
+//
+typedef string::difference_type stringpiece_ssize_type;
+
+// STRINGPIECE_CHECK_SIZE protects us from 32-bit overflows.
+// TODO(mec): delete this after stringpiece_ssize_type goes 64 bit.
+#if !defined(NDEBUG)
+#define STRINGPIECE_CHECK_SIZE 1
+#elif defined(_FORTIFY_SOURCE) && _FORTIFY_SOURCE > 0
+#define STRINGPIECE_CHECK_SIZE 1
+#else
+#define STRINGPIECE_CHECK_SIZE 0
+#endif
+
+class LIBPROTOBUF_EXPORT StringPiece {
+ private:
+  const char* ptr_;
+  stringpiece_ssize_type length_;
+
+  // Prevent overflow in debug mode or fortified mode.
+  // sizeof(stringpiece_ssize_type) may be smaller than sizeof(size_t).
+  static stringpiece_ssize_type CheckedSsizeTFromSizeT(size_t size) {
+#if STRINGPIECE_CHECK_SIZE > 0
+#ifdef max
+#undef max
+#endif
+    if (size > static_cast<size_t>(
+        std::numeric_limits<stringpiece_ssize_type>::max())) {
+      // Some people grep for this message in logs
+      // so take care if you ever change it.
+      LogFatalSizeTooBig(size, "size_t to int conversion");
+    }
+#endif
+    return static_cast<stringpiece_ssize_type>(size);
+  }
+
+  // Out-of-line error path.
+  static void LogFatalSizeTooBig(size_t size, const char* details);
+
+ public:
+  // We provide non-explicit singleton constructors so users can pass
+  // in a "const char*" or a "string" wherever a "StringPiece" is
+  // expected.
+  //
+  // Style guide exception granted:
+  // http://goto/style-guide-exception-20978288
+  StringPiece() : ptr_(NULL), length_(0) {}
+
+  StringPiece(const char* str)  // NOLINT(runtime/explicit)
+      : ptr_(str), length_(0) {
+    if (str != NULL) {
+      length_ = CheckedSsizeTFromSizeT(strlen(str));
+    }
+  }
+
+  template <class Allocator>
+  StringPiece(  // NOLINT(runtime/explicit)
+      const std::basic_string<char, std::char_traits<char>, Allocator>& str)
+      : ptr_(str.data()), length_(0) {
+    length_ = CheckedSsizeTFromSizeT(str.size());
+  }
+#if defined(HAS_GLOBAL_STRING)
+  template <class Allocator>
+  StringPiece(  // NOLINT(runtime/explicit)
+      const basic_string<char, std::char_traits<char>, Allocator>& str)
+      : ptr_(str.data()), length_(0) {
+    length_ = CheckedSsizeTFromSizeT(str.size());
+  }
+#endif
+
+  StringPiece(const char* offset, stringpiece_ssize_type len)
+      : ptr_(offset), length_(len) {
+    assert(len >= 0);
+  }
+
+  // Substring of another StringPiece.
+  // pos must be non-negative and <= x.length().
+  StringPiece(StringPiece x, stringpiece_ssize_type pos);
+  // Substring of another StringPiece.
+  // pos must be non-negative and <= x.length().
+  // len must be non-negative and will be pinned to at most x.length() - pos.
+  StringPiece(StringPiece x,
+              stringpiece_ssize_type pos,
+              stringpiece_ssize_type len);
+
+  // data() may return a pointer to a buffer with embedded NULs, and the
+  // returned buffer may or may not be null terminated.  Therefore it is
+  // typically a mistake to pass data() to a routine that expects a NUL
+  // terminated string.
+  const char* data() const { return ptr_; }
+  stringpiece_ssize_type size() const { return length_; }
+  stringpiece_ssize_type length() const { return length_; }
+  bool empty() const { return length_ == 0; }
+
+  void clear() {
+    ptr_ = NULL;
+    length_ = 0;
+  }
+
+  void set(const char* data, stringpiece_ssize_type len) {
+    assert(len >= 0);
+    ptr_ = data;
+    length_ = len;
+  }
+
+  void set(const char* str) {
+    ptr_ = str;
+    if (str != NULL)
+      length_ = CheckedSsizeTFromSizeT(strlen(str));
+    else
+      length_ = 0;
+  }
+
+  void set(const void* data, stringpiece_ssize_type len) {
+    ptr_ = reinterpret_cast<const char*>(data);
+    length_ = len;
+  }
+
+  char operator[](stringpiece_ssize_type i) const {
+    assert(0 <= i);
+    assert(i < length_);
+    return ptr_[i];
+  }
+
+  void remove_prefix(stringpiece_ssize_type n) {
+    assert(length_ >= n);
+    ptr_ += n;
+    length_ -= n;
+  }
+
+  void remove_suffix(stringpiece_ssize_type n) {
+    assert(length_ >= n);
+    length_ -= n;
+  }
+
+  // returns {-1, 0, 1}
+  int compare(StringPiece x) const {
+    const stringpiece_ssize_type min_size =
+        length_ < x.length_ ? length_ : x.length_;
+    int r = memcmp(ptr_, x.ptr_, min_size);
+    if (r < 0) return -1;
+    if (r > 0) return 1;
+    if (length_ < x.length_) return -1;
+    if (length_ > x.length_) return 1;
+    return 0;
+  }
+
+  string as_string() const {
+    return ToString();
+  }
+  // We also define ToString() here, since many other string-like
+  // interfaces name the routine that converts to a C++ string
+  // "ToString", and it's confusing to have the method that does that
+  // for a StringPiece be called "as_string()".  We also leave the
+  // "as_string()" method defined here for existing code.
+  string ToString() const {
+    if (ptr_ == NULL) return string();
+    return string(data(), size());
+  }
+
+  operator string() const {
+    return ToString();
+  }
+
+  void CopyToString(string* target) const;
+  void AppendToString(string* target) const;
+
+  bool starts_with(StringPiece x) const {
+    return (length_ >= x.length_) && (memcmp(ptr_, x.ptr_, x.length_) == 0);
+  }
+
+  bool ends_with(StringPiece x) const {
+    return ((length_ >= x.length_) &&
+            (memcmp(ptr_ + (length_-x.length_), x.ptr_, x.length_) == 0));
+  }
+
+  // Checks whether StringPiece starts with x and if so advances the beginning
+  // of it to past the match.  It's basically a shortcut for starts_with
+  // followed by remove_prefix.
+  bool Consume(StringPiece x);
+  // Like above but for the end of the string.
+  bool ConsumeFromEnd(StringPiece x);
+
+  // standard STL container boilerplate
+  typedef char value_type;
+  typedef const char* pointer;
+  typedef const char& reference;
+  typedef const char& const_reference;
+  typedef size_t size_type;
+  typedef ptrdiff_t difference_type;
+  static const size_type npos;
+  typedef const char* const_iterator;
+  typedef const char* iterator;
+  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+  typedef std::reverse_iterator<iterator> reverse_iterator;
+  iterator begin() const { return ptr_; }
+  iterator end() const { return ptr_ + length_; }
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(ptr_ + length_);
+  }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(ptr_);
+  }
+  stringpiece_ssize_type max_size() const { return length_; }
+  stringpiece_ssize_type capacity() const { return length_; }
+
+  // cpplint.py emits a false positive [build/include_what_you_use]
+  stringpiece_ssize_type copy(char* buf, size_type n, size_type pos = 0) const;  // NOLINT
+
+  bool contains(StringPiece s) const;
+
+  stringpiece_ssize_type find(StringPiece s, size_type pos = 0) const;
+  stringpiece_ssize_type find(char c, size_type pos = 0) const;
+  stringpiece_ssize_type rfind(StringPiece s, size_type pos = npos) const;
+  stringpiece_ssize_type rfind(char c, size_type pos = npos) const;
+
+  stringpiece_ssize_type find_first_of(StringPiece s, size_type pos = 0) const;
+  stringpiece_ssize_type find_first_of(char c, size_type pos = 0) const {
+    return find(c, pos);
+  }
+  stringpiece_ssize_type find_first_not_of(StringPiece s,
+                                           size_type pos = 0) const;
+  stringpiece_ssize_type find_first_not_of(char c, size_type pos = 0) const;
+  stringpiece_ssize_type find_last_of(StringPiece s,
+                                      size_type pos = npos) const;
+  stringpiece_ssize_type find_last_of(char c, size_type pos = npos) const {
+    return rfind(c, pos);
+  }
+  stringpiece_ssize_type find_last_not_of(StringPiece s,
+                                          size_type pos = npos) const;
+  stringpiece_ssize_type find_last_not_of(char c, size_type pos = npos) const;
+
+  StringPiece substr(size_type pos, size_type n = npos) const;
+};
+
+// This large function is defined inline so that in a fairly common case where
+// one of the arguments is a literal, the compiler can elide a lot of the
+// following comparisons.
+inline bool operator==(StringPiece x, StringPiece y) {
+  stringpiece_ssize_type len = x.size();
+  if (len != y.size()) {
+    return false;
+  }
+
+  return x.data() == y.data() || len <= 0 ||
+      memcmp(x.data(), y.data(), len) == 0;
+}
+
+inline bool operator!=(StringPiece x, StringPiece y) {
+  return !(x == y);
+}
+
+inline bool operator<(StringPiece x, StringPiece y) {
+  const stringpiece_ssize_type min_size =
+      x.size() < y.size() ? x.size() : y.size();
+  const int r = memcmp(x.data(), y.data(), min_size);
+  return (r < 0) || (r == 0 && x.size() < y.size());
+}
+
+inline bool operator>(StringPiece x, StringPiece y) {
+  return y < x;
+}
+
+inline bool operator<=(StringPiece x, StringPiece y) {
+  return !(x > y);
+}
+
+inline bool operator>=(StringPiece x, StringPiece y) {
+  return !(x < y);
+}
+
+// allow StringPiece to be logged
+extern std::ostream& operator<<(std::ostream& o, StringPiece piece);
+
+}  // namespace protobuf
+}  // namespace google
+
+GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_START
+template<> struct hash<StringPiece> {
+  size_t operator()(const StringPiece& s) const {
+    size_t result = 0;
+    for (const char *str = s.data(), *end = str + s.size(); str < end; str++) {  
+      result = 5 * result + *str;
+    }
+    return result;
+  }
+};
+GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_END
+
+#endif  // STRINGS_STRINGPIECE_H_
diff --git a/src/google/protobuf/stubs/stringpiece_unittest.cc b/src/google/protobuf/stubs/stringpiece_unittest.cc
new file mode 100644
index 0000000..a52d81f
--- /dev/null
+++ b/src/google/protobuf/stubs/stringpiece_unittest.cc
@@ -0,0 +1,794 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <google/protobuf/stubs/stringpiece.h>
+
+#include <iterator>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <google/protobuf/testing/googletest.h>
+#include <google/protobuf/stubs/hash.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+namespace {
+TEST(StringPiece, Ctor) {
+  {
+    // Null.
+    StringPiece s10;
+    EXPECT_TRUE(s10.data() == NULL);
+    EXPECT_EQ(0, s10.length());
+  }
+
+  {
+    // const char* without length.
+    const char* hello = "hello";
+    StringPiece s20(hello);
+    EXPECT_TRUE(s20.data() == hello);
+    EXPECT_EQ(5, s20.length());
+
+    // const char* with length.
+    StringPiece s21(hello, 4);
+    EXPECT_TRUE(s21.data() == hello);
+    EXPECT_EQ(4, s21.length());
+
+    // Not recommended, but valid C++
+    StringPiece s22(hello, 6);
+    EXPECT_TRUE(s22.data() == hello);
+    EXPECT_EQ(6, s22.length());
+  }
+
+  {
+    // std::string.
+    std::string hola = "hola";
+    StringPiece s30(hola);
+    EXPECT_TRUE(s30.data() == hola.data());
+    EXPECT_EQ(4, s30.length());
+
+    // std::string with embedded '\0'.
+    hola.push_back('\0');
+    hola.append("h2");
+    hola.push_back('\0');
+    StringPiece s31(hola);
+    EXPECT_TRUE(s31.data() == hola.data());
+    EXPECT_EQ(8, s31.length());
+  }
+
+#if defined(HAS_GLOBAL_STRING)
+  {
+    // ::string
+    string bonjour = "bonjour";
+    StringPiece s40(bonjour);
+    EXPECT_TRUE(s40.data() == bonjour.data());
+    EXPECT_EQ(7, s40.length());
+  }
+#endif
+
+  // TODO(mec): StringPiece(StringPiece x, int pos);
+  // TODO(mec): StringPiece(StringPiece x, int pos, int len);
+  // TODO(mec): StringPiece(const StringPiece&);
+}
+
+TEST(StringPiece, STLComparator) {
+  string s1("foo");
+  string s2("bar");
+  string s3("baz");
+
+  StringPiece p1(s1);
+  StringPiece p2(s2);
+  StringPiece p3(s3);
+
+  typedef std::map<StringPiece, int> TestMap;
+  TestMap map;
+
+  map.insert(std::make_pair(p1, 0));
+  map.insert(std::make_pair(p2, 1));
+  map.insert(std::make_pair(p3, 2));
+  EXPECT_EQ(map.size(), 3);
+
+  TestMap::const_iterator iter = map.begin();
+  EXPECT_EQ(iter->second, 1);
+  ++iter;
+  EXPECT_EQ(iter->second, 2);
+  ++iter;
+  EXPECT_EQ(iter->second, 0);
+  ++iter;
+  EXPECT_TRUE(iter == map.end());
+
+  TestMap::iterator new_iter = map.find("zot");
+  EXPECT_TRUE(new_iter == map.end());
+
+  new_iter = map.find("bar");
+  EXPECT_TRUE(new_iter != map.end());
+
+  map.erase(new_iter);
+  EXPECT_EQ(map.size(), 2);
+
+  iter = map.begin();
+  EXPECT_EQ(iter->second, 2);
+  ++iter;
+  EXPECT_EQ(iter->second, 0);
+  ++iter;
+  EXPECT_TRUE(iter == map.end());
+}
+
+TEST(StringPiece, ComparisonOperators) {
+#define COMPARE(result, op, x, y) \
+  EXPECT_EQ(result, StringPiece((x)) op StringPiece((y))); \
+  EXPECT_EQ(result, StringPiece((x)).compare(StringPiece((y))) op 0)
+
+  COMPARE(true, ==, "",   "");
+  COMPARE(true, ==, "", NULL);
+  COMPARE(true, ==, NULL, "");
+  COMPARE(true, ==, "a",  "a");
+  COMPARE(true, ==, "aa", "aa");
+  COMPARE(false, ==, "a",  "");
+  COMPARE(false, ==, "",   "a");
+  COMPARE(false, ==, "a",  "b");
+  COMPARE(false, ==, "a",  "aa");
+  COMPARE(false, ==, "aa", "a");
+
+  COMPARE(false, !=, "",   "");
+  COMPARE(false, !=, "a",  "a");
+  COMPARE(false, !=, "aa", "aa");
+  COMPARE(true, !=, "a",  "");
+  COMPARE(true, !=, "",   "a");
+  COMPARE(true, !=, "a",  "b");
+  COMPARE(true, !=, "a",  "aa");
+  COMPARE(true, !=, "aa", "a");
+
+  COMPARE(true, <, "a",  "b");
+  COMPARE(true, <, "a",  "aa");
+  COMPARE(true, <, "aa", "b");
+  COMPARE(true, <, "aa", "bb");
+  COMPARE(false, <, "a",  "a");
+  COMPARE(false, <, "b",  "a");
+  COMPARE(false, <, "aa", "a");
+  COMPARE(false, <, "b",  "aa");
+  COMPARE(false, <, "bb", "aa");
+
+  COMPARE(true, <=, "a",  "a");
+  COMPARE(true, <=, "a",  "b");
+  COMPARE(true, <=, "a",  "aa");
+  COMPARE(true, <=, "aa", "b");
+  COMPARE(true, <=, "aa", "bb");
+  COMPARE(false, <=, "b",  "a");
+  COMPARE(false, <=, "aa", "a");
+  COMPARE(false, <=, "b",  "aa");
+  COMPARE(false, <=, "bb", "aa");
+
+  COMPARE(false, >=, "a",  "b");
+  COMPARE(false, >=, "a",  "aa");
+  COMPARE(false, >=, "aa", "b");
+  COMPARE(false, >=, "aa", "bb");
+  COMPARE(true, >=, "a",  "a");
+  COMPARE(true, >=, "b",  "a");
+  COMPARE(true, >=, "aa", "a");
+  COMPARE(true, >=, "b",  "aa");
+  COMPARE(true, >=, "bb", "aa");
+
+  COMPARE(false, >, "a",  "a");
+  COMPARE(false, >, "a",  "b");
+  COMPARE(false, >, "a",  "aa");
+  COMPARE(false, >, "aa", "b");
+  COMPARE(false, >, "aa", "bb");
+  COMPARE(true, >, "b",  "a");
+  COMPARE(true, >, "aa", "a");
+  COMPARE(true, >, "b",  "aa");
+  COMPARE(true, >, "bb", "aa");
+
+  string x;
+  for (int i = 0; i < 256; i++) {
+    x += 'a';
+    string y = x;
+    COMPARE(true, ==, x, y);
+    for (int j = 0; j < i; j++) {
+      string z = x;
+      z[j] = 'b';       // Differs in position 'j'
+      COMPARE(false, ==, x, z);
+      COMPARE(true, <, x, z);
+      COMPARE(true, >, z, x);
+      if (j + 1 < i) {
+        z[j + 1] = 'A';  // Differs in position 'j+1' as well
+        COMPARE(false, ==, x, z);
+        COMPARE(true, <, x, z);
+        COMPARE(true, >, z, x);
+        z[j + 1] = 'z';  // Differs in position 'j+1' as well
+        COMPARE(false, ==, x, z);
+        COMPARE(true, <, x, z);
+        COMPARE(true, >, z, x);
+      }
+    }
+  }
+
+#undef COMPARE
+}
+
+TEST(StringPiece, STL1) {
+  const StringPiece a("abcdefghijklmnopqrstuvwxyz");
+  const StringPiece b("abc");
+  const StringPiece c("xyz");
+  const StringPiece d("foobar");
+  const StringPiece e;
+  string temp("123");
+  temp += '\0';
+  temp += "456";
+  const StringPiece f(temp);
+
+  EXPECT_EQ(a[6], 'g');
+  EXPECT_EQ(b[0], 'a');
+  EXPECT_EQ(c[2], 'z');
+  EXPECT_EQ(f[3], '\0');
+  EXPECT_EQ(f[5], '5');
+
+  EXPECT_EQ(*d.data(), 'f');
+  EXPECT_EQ(d.data()[5], 'r');
+  EXPECT_TRUE(e.data() == NULL);
+
+  EXPECT_EQ(*a.begin(), 'a');
+  EXPECT_EQ(*(b.begin() + 2), 'c');
+  EXPECT_EQ(*(c.end() - 1), 'z');
+
+  EXPECT_EQ(*a.rbegin(), 'z');
+  EXPECT_EQ(*(b.rbegin() + 2), 'a');
+  EXPECT_EQ(*(c.rend() - 1), 'x');
+  EXPECT_TRUE(a.rbegin() + 26 == a.rend());
+
+  EXPECT_EQ(a.size(), 26);
+  EXPECT_EQ(b.size(), 3);
+  EXPECT_EQ(c.size(), 3);
+  EXPECT_EQ(d.size(), 6);
+  EXPECT_EQ(e.size(), 0);
+  EXPECT_EQ(f.size(), 7);
+
+  EXPECT_TRUE(!d.empty());
+  EXPECT_TRUE(d.begin() != d.end());
+  EXPECT_TRUE(d.begin() + 6 == d.end());
+
+  EXPECT_TRUE(e.empty());
+  EXPECT_TRUE(e.begin() == e.end());
+
+  EXPECT_GE(a.max_size(), a.capacity());
+  EXPECT_GE(a.capacity(), a.size());
+
+  char buf[4] = { '%', '%', '%', '%' };
+  EXPECT_EQ(a.copy(buf, 4), 4);
+  EXPECT_EQ(buf[0], a[0]);
+  EXPECT_EQ(buf[1], a[1]);
+  EXPECT_EQ(buf[2], a[2]);
+  EXPECT_EQ(buf[3], a[3]);
+  EXPECT_EQ(a.copy(buf, 3, 7), 3);
+  EXPECT_EQ(buf[0], a[7]);
+  EXPECT_EQ(buf[1], a[8]);
+  EXPECT_EQ(buf[2], a[9]);
+  EXPECT_EQ(buf[3], a[3]);
+  EXPECT_EQ(c.copy(buf, 99), 3);
+  EXPECT_EQ(buf[0], c[0]);
+  EXPECT_EQ(buf[1], c[1]);
+  EXPECT_EQ(buf[2], c[2]);
+  EXPECT_EQ(buf[3], a[3]);
+}
+
+// Separated from STL1() because some compilers produce an overly
+// large stack frame for the combined function.
+TEST(StringPiece, STL2) {
+  const StringPiece a("abcdefghijklmnopqrstuvwxyz");
+  const StringPiece b("abc");
+  const StringPiece c("xyz");
+  StringPiece d("foobar");
+  const StringPiece e;
+  const StringPiece f("123" "\0" "456", 7);
+
+  d.clear();
+  EXPECT_EQ(d.size(), 0);
+  EXPECT_TRUE(d.empty());
+  EXPECT_TRUE(d.data() == NULL);
+  EXPECT_TRUE(d.begin() == d.end());
+
+  EXPECT_EQ(StringPiece::npos, string::npos);
+
+  EXPECT_EQ(a.find(b), 0);
+  EXPECT_EQ(a.find(b, 1), StringPiece::npos);
+  EXPECT_EQ(a.find(c), 23);
+  EXPECT_EQ(a.find(c, 9), 23);
+  EXPECT_EQ(a.find(c, StringPiece::npos), StringPiece::npos);
+  EXPECT_EQ(b.find(c), StringPiece::npos);
+  EXPECT_EQ(b.find(c, StringPiece::npos), StringPiece::npos);
+  EXPECT_EQ(a.find(d), 0);
+  EXPECT_EQ(a.find(e), 0);
+  EXPECT_EQ(a.find(d, 12), 12);
+  EXPECT_EQ(a.find(e, 17), 17);
+  StringPiece g("xx not found bb");
+  EXPECT_EQ(a.find(g), StringPiece::npos);
+  // empty string nonsense
+  EXPECT_EQ(d.find(b), StringPiece::npos);
+  EXPECT_EQ(e.find(b), StringPiece::npos);
+  EXPECT_EQ(d.find(b, 4), StringPiece::npos);
+  EXPECT_EQ(e.find(b, 7), StringPiece::npos);
+
+  size_t empty_search_pos = string().find(string());
+  EXPECT_EQ(d.find(d), empty_search_pos);
+  EXPECT_EQ(d.find(e), empty_search_pos);
+  EXPECT_EQ(e.find(d), empty_search_pos);
+  EXPECT_EQ(e.find(e), empty_search_pos);
+  EXPECT_EQ(d.find(d, 4), string().find(string(), 4));
+  EXPECT_EQ(d.find(e, 4), string().find(string(), 4));
+  EXPECT_EQ(e.find(d, 4), string().find(string(), 4));
+  EXPECT_EQ(e.find(e, 4), string().find(string(), 4));
+
+  EXPECT_EQ(a.find('a'), 0);
+  EXPECT_EQ(a.find('c'), 2);
+  EXPECT_EQ(a.find('z'), 25);
+  EXPECT_EQ(a.find('$'), StringPiece::npos);
+  EXPECT_EQ(a.find('\0'), StringPiece::npos);
+  EXPECT_EQ(f.find('\0'), 3);
+  EXPECT_EQ(f.find('3'), 2);
+  EXPECT_EQ(f.find('5'), 5);
+  EXPECT_EQ(g.find('o'), 4);
+  EXPECT_EQ(g.find('o', 4), 4);
+  EXPECT_EQ(g.find('o', 5), 8);
+  EXPECT_EQ(a.find('b', 5), StringPiece::npos);
+  // empty string nonsense
+  EXPECT_EQ(d.find('\0'), StringPiece::npos);
+  EXPECT_EQ(e.find('\0'), StringPiece::npos);
+  EXPECT_EQ(d.find('\0', 4), StringPiece::npos);
+  EXPECT_EQ(e.find('\0', 7), StringPiece::npos);
+  EXPECT_EQ(d.find('x'), StringPiece::npos);
+  EXPECT_EQ(e.find('x'), StringPiece::npos);
+  EXPECT_EQ(d.find('x', 4), StringPiece::npos);
+  EXPECT_EQ(e.find('x', 7), StringPiece::npos);
+
+  EXPECT_EQ(a.rfind(b), 0);
+  EXPECT_EQ(a.rfind(b, 1), 0);
+  EXPECT_EQ(a.rfind(c), 23);
+  EXPECT_EQ(a.rfind(c, 22), StringPiece::npos);
+  EXPECT_EQ(a.rfind(c, 1), StringPiece::npos);
+  EXPECT_EQ(a.rfind(c, 0), StringPiece::npos);
+  EXPECT_EQ(b.rfind(c), StringPiece::npos);
+  EXPECT_EQ(b.rfind(c, 0), StringPiece::npos);
+  EXPECT_EQ(a.rfind(d), a.as_string().rfind(string()));
+  EXPECT_EQ(a.rfind(e), a.as_string().rfind(string()));
+  EXPECT_EQ(a.rfind(d, 12), 12);
+  EXPECT_EQ(a.rfind(e, 17), 17);
+  EXPECT_EQ(a.rfind(g), StringPiece::npos);
+  EXPECT_EQ(d.rfind(b), StringPiece::npos);
+  EXPECT_EQ(e.rfind(b), StringPiece::npos);
+  EXPECT_EQ(d.rfind(b, 4), StringPiece::npos);
+  EXPECT_EQ(e.rfind(b, 7), StringPiece::npos);
+  // empty string nonsense
+  EXPECT_EQ(d.rfind(d, 4), string().rfind(string()));
+  EXPECT_EQ(e.rfind(d, 7), string().rfind(string()));
+  EXPECT_EQ(d.rfind(e, 4), string().rfind(string()));
+  EXPECT_EQ(e.rfind(e, 7), string().rfind(string()));
+  EXPECT_EQ(d.rfind(d), string().rfind(string()));
+  EXPECT_EQ(e.rfind(d), string().rfind(string()));
+  EXPECT_EQ(d.rfind(e), string().rfind(string()));
+  EXPECT_EQ(e.rfind(e), string().rfind(string()));
+
+  EXPECT_EQ(g.rfind('o'), 8);
+  EXPECT_EQ(g.rfind('q'), StringPiece::npos);
+  EXPECT_EQ(g.rfind('o', 8), 8);
+  EXPECT_EQ(g.rfind('o', 7), 4);
+  EXPECT_EQ(g.rfind('o', 3), StringPiece::npos);
+  EXPECT_EQ(f.rfind('\0'), 3);
+  EXPECT_EQ(f.rfind('\0', 12), 3);
+  EXPECT_EQ(f.rfind('3'), 2);
+  EXPECT_EQ(f.rfind('5'), 5);
+  // empty string nonsense
+  EXPECT_EQ(d.rfind('o'), StringPiece::npos);
+  EXPECT_EQ(e.rfind('o'), StringPiece::npos);
+  EXPECT_EQ(d.rfind('o', 4), StringPiece::npos);
+  EXPECT_EQ(e.rfind('o', 7), StringPiece::npos);
+
+  EXPECT_EQ(a.find_first_of(b), 0);
+  EXPECT_EQ(a.find_first_of(b, 0), 0);
+  EXPECT_EQ(a.find_first_of(b, 1), 1);
+  EXPECT_EQ(a.find_first_of(b, 2), 2);
+  EXPECT_EQ(a.find_first_of(b, 3), StringPiece::npos);
+  EXPECT_EQ(a.find_first_of(c), 23);
+  EXPECT_EQ(a.find_first_of(c, 23), 23);
+  EXPECT_EQ(a.find_first_of(c, 24), 24);
+  EXPECT_EQ(a.find_first_of(c, 25), 25);
+  EXPECT_EQ(a.find_first_of(c, 26), StringPiece::npos);
+  EXPECT_EQ(g.find_first_of(b), 13);
+  EXPECT_EQ(g.find_first_of(c), 0);
+  EXPECT_EQ(a.find_first_of(f), StringPiece::npos);
+  EXPECT_EQ(f.find_first_of(a), StringPiece::npos);
+  // empty string nonsense
+  EXPECT_EQ(a.find_first_of(d), StringPiece::npos);
+  EXPECT_EQ(a.find_first_of(e), StringPiece::npos);
+  EXPECT_EQ(d.find_first_of(b), StringPiece::npos);
+  EXPECT_EQ(e.find_first_of(b), StringPiece::npos);
+  EXPECT_EQ(d.find_first_of(d), StringPiece::npos);
+  EXPECT_EQ(e.find_first_of(d), StringPiece::npos);
+  EXPECT_EQ(d.find_first_of(e), StringPiece::npos);
+  EXPECT_EQ(e.find_first_of(e), StringPiece::npos);
+
+  EXPECT_EQ(a.find_first_not_of(b), 3);
+  EXPECT_EQ(a.find_first_not_of(c), 0);
+  EXPECT_EQ(b.find_first_not_of(a), StringPiece::npos);
+  EXPECT_EQ(c.find_first_not_of(a), StringPiece::npos);
+  EXPECT_EQ(f.find_first_not_of(a), 0);
+  EXPECT_EQ(a.find_first_not_of(f), 0);
+  EXPECT_EQ(a.find_first_not_of(d), 0);
+  EXPECT_EQ(a.find_first_not_of(e), 0);
+  // empty string nonsense
+  EXPECT_EQ(d.find_first_not_of(a), StringPiece::npos);
+  EXPECT_EQ(e.find_first_not_of(a), StringPiece::npos);
+  EXPECT_EQ(d.find_first_not_of(d), StringPiece::npos);
+  EXPECT_EQ(e.find_first_not_of(d), StringPiece::npos);
+  EXPECT_EQ(d.find_first_not_of(e), StringPiece::npos);
+  EXPECT_EQ(e.find_first_not_of(e), StringPiece::npos);
+
+  StringPiece h("====");
+  EXPECT_EQ(h.find_first_not_of('='), StringPiece::npos);
+  EXPECT_EQ(h.find_first_not_of('=', 3), StringPiece::npos);
+  EXPECT_EQ(h.find_first_not_of('\0'), 0);
+  EXPECT_EQ(g.find_first_not_of('x'), 2);
+  EXPECT_EQ(f.find_first_not_of('\0'), 0);
+  EXPECT_EQ(f.find_first_not_of('\0', 3), 4);
+  EXPECT_EQ(f.find_first_not_of('\0', 2), 2);
+  // empty string nonsense
+  EXPECT_EQ(d.find_first_not_of('x'), StringPiece::npos);
+  EXPECT_EQ(e.find_first_not_of('x'), StringPiece::npos);
+  EXPECT_EQ(d.find_first_not_of('\0'), StringPiece::npos);
+  EXPECT_EQ(e.find_first_not_of('\0'), StringPiece::npos);
+
+  //  StringPiece g("xx not found bb");
+  StringPiece i("56");
+  EXPECT_EQ(h.find_last_of(a), StringPiece::npos);
+  EXPECT_EQ(g.find_last_of(a), g.size()-1);
+  EXPECT_EQ(a.find_last_of(b), 2);
+  EXPECT_EQ(a.find_last_of(c), a.size()-1);
+  EXPECT_EQ(f.find_last_of(i), 6);
+  EXPECT_EQ(a.find_last_of('a'), 0);
+  EXPECT_EQ(a.find_last_of('b'), 1);
+  EXPECT_EQ(a.find_last_of('z'), 25);
+  EXPECT_EQ(a.find_last_of('a', 5), 0);
+  EXPECT_EQ(a.find_last_of('b', 5), 1);
+  EXPECT_EQ(a.find_last_of('b', 0), StringPiece::npos);
+  EXPECT_EQ(a.find_last_of('z', 25), 25);
+  EXPECT_EQ(a.find_last_of('z', 24), StringPiece::npos);
+  EXPECT_EQ(f.find_last_of(i, 5), 5);
+  EXPECT_EQ(f.find_last_of(i, 6), 6);
+  EXPECT_EQ(f.find_last_of(a, 4), StringPiece::npos);
+  // empty string nonsense
+  EXPECT_EQ(f.find_last_of(d), StringPiece::npos);
+  EXPECT_EQ(f.find_last_of(e), StringPiece::npos);
+  EXPECT_EQ(f.find_last_of(d, 4), StringPiece::npos);
+  EXPECT_EQ(f.find_last_of(e, 4), StringPiece::npos);
+  EXPECT_EQ(d.find_last_of(d), StringPiece::npos);
+  EXPECT_EQ(d.find_last_of(e), StringPiece::npos);
+  EXPECT_EQ(e.find_last_of(d), StringPiece::npos);
+  EXPECT_EQ(e.find_last_of(e), StringPiece::npos);
+  EXPECT_EQ(d.find_last_of(f), StringPiece::npos);
+  EXPECT_EQ(e.find_last_of(f), StringPiece::npos);
+  EXPECT_EQ(d.find_last_of(d, 4), StringPiece::npos);
+  EXPECT_EQ(d.find_last_of(e, 4), StringPiece::npos);
+  EXPECT_EQ(e.find_last_of(d, 4), StringPiece::npos);
+  EXPECT_EQ(e.find_last_of(e, 4), StringPiece::npos);
+  EXPECT_EQ(d.find_last_of(f, 4), StringPiece::npos);
+  EXPECT_EQ(e.find_last_of(f, 4), StringPiece::npos);
+
+  EXPECT_EQ(a.find_last_not_of(b), a.size()-1);
+  EXPECT_EQ(a.find_last_not_of(c), 22);
+  EXPECT_EQ(b.find_last_not_of(a), StringPiece::npos);
+  EXPECT_EQ(b.find_last_not_of(b), StringPiece::npos);
+  EXPECT_EQ(f.find_last_not_of(i), 4);
+  EXPECT_EQ(a.find_last_not_of(c, 24), 22);
+  EXPECT_EQ(a.find_last_not_of(b, 3), 3);
+  EXPECT_EQ(a.find_last_not_of(b, 2), StringPiece::npos);
+  // empty string nonsense
+  EXPECT_EQ(f.find_last_not_of(d), f.size()-1);
+  EXPECT_EQ(f.find_last_not_of(e), f.size()-1);
+  EXPECT_EQ(f.find_last_not_of(d, 4), 4);
+  EXPECT_EQ(f.find_last_not_of(e, 4), 4);
+  EXPECT_EQ(d.find_last_not_of(d), StringPiece::npos);
+  EXPECT_EQ(d.find_last_not_of(e), StringPiece::npos);
+  EXPECT_EQ(e.find_last_not_of(d), StringPiece::npos);
+  EXPECT_EQ(e.find_last_not_of(e), StringPiece::npos);
+  EXPECT_EQ(d.find_last_not_of(f), StringPiece::npos);
+  EXPECT_EQ(e.find_last_not_of(f), StringPiece::npos);
+  EXPECT_EQ(d.find_last_not_of(d, 4), StringPiece::npos);
+  EXPECT_EQ(d.find_last_not_of(e, 4), StringPiece::npos);
+  EXPECT_EQ(e.find_last_not_of(d, 4), StringPiece::npos);
+  EXPECT_EQ(e.find_last_not_of(e, 4), StringPiece::npos);
+  EXPECT_EQ(d.find_last_not_of(f, 4), StringPiece::npos);
+  EXPECT_EQ(e.find_last_not_of(f, 4), StringPiece::npos);
+
+  EXPECT_EQ(h.find_last_not_of('x'), h.size() - 1);
+  EXPECT_EQ(h.find_last_not_of('='), StringPiece::npos);
+  EXPECT_EQ(b.find_last_not_of('c'), 1);
+  EXPECT_EQ(h.find_last_not_of('x', 2), 2);
+  EXPECT_EQ(h.find_last_not_of('=', 2), StringPiece::npos);
+  EXPECT_EQ(b.find_last_not_of('b', 1), 0);
+  // empty string nonsense
+  EXPECT_EQ(d.find_last_not_of('x'), StringPiece::npos);
+  EXPECT_EQ(e.find_last_not_of('x'), StringPiece::npos);
+  EXPECT_EQ(d.find_last_not_of('\0'), StringPiece::npos);
+  EXPECT_EQ(e.find_last_not_of('\0'), StringPiece::npos);
+
+  EXPECT_EQ(a.substr(0, 3), b);
+  EXPECT_EQ(a.substr(23), c);
+  EXPECT_EQ(a.substr(23, 3), c);
+  EXPECT_EQ(a.substr(23, 99), c);
+  EXPECT_EQ(a.substr(0), a);
+  EXPECT_EQ(a.substr(3, 2), "de");
+  // empty string nonsense
+  EXPECT_EQ(a.substr(99, 2), e);
+  EXPECT_EQ(d.substr(99), e);
+  EXPECT_EQ(d.substr(0, 99), e);
+  EXPECT_EQ(d.substr(99, 99), e);
+  // use of npos
+  EXPECT_EQ(a.substr(0, StringPiece::npos), a);
+  EXPECT_EQ(a.substr(23, StringPiece::npos), c);
+  EXPECT_EQ(a.substr(StringPiece::npos, 0), e);
+  EXPECT_EQ(a.substr(StringPiece::npos, 1), e);
+  EXPECT_EQ(a.substr(StringPiece::npos, StringPiece::npos), e);
+
+  // Substring constructors.
+  EXPECT_EQ(StringPiece(a, 0, 3), b);
+  EXPECT_EQ(StringPiece(a, 23), c);
+  EXPECT_EQ(StringPiece(a, 23, 3), c);
+  EXPECT_EQ(StringPiece(a, 23, 99), c);
+  EXPECT_EQ(StringPiece(a, 0), a);
+  EXPECT_EQ(StringPiece(a, 3, 2), "de");
+  // empty string nonsense
+  EXPECT_EQ(StringPiece(d, 0, 99), e);
+  // Verify that they work taking an actual string, not just a StringPiece.
+  string a2 = a.as_string();
+  EXPECT_EQ(StringPiece(a2, 0, 3), b);
+  EXPECT_EQ(StringPiece(a2, 23), c);
+  EXPECT_EQ(StringPiece(a2, 23, 3), c);
+  EXPECT_EQ(StringPiece(a2, 23, 99), c);
+  EXPECT_EQ(StringPiece(a2, 0), a);
+  EXPECT_EQ(StringPiece(a2, 3, 2), "de");
+}
+
+TEST(StringPiece, Custom) {
+  StringPiece a("foobar");
+  string s1("123");
+  s1 += '\0';
+  s1 += "456";
+  StringPiece b(s1);
+  StringPiece e;
+  string s2;
+
+  // CopyToString
+  a.CopyToString(&s2);
+  EXPECT_EQ(s2.size(), 6);
+  EXPECT_EQ(s2, "foobar");
+  b.CopyToString(&s2);
+  EXPECT_EQ(s2.size(), 7);
+  EXPECT_EQ(s1, s2);
+  e.CopyToString(&s2);
+  EXPECT_TRUE(s2.empty());
+
+  // AppendToString
+  s2.erase();
+  a.AppendToString(&s2);
+  EXPECT_EQ(s2.size(), 6);
+  EXPECT_EQ(s2, "foobar");
+  a.AppendToString(&s2);
+  EXPECT_EQ(s2.size(), 12);
+  EXPECT_EQ(s2, "foobarfoobar");
+
+  // starts_with
+  EXPECT_TRUE(a.starts_with(a));
+  EXPECT_TRUE(a.starts_with("foo"));
+  EXPECT_TRUE(a.starts_with(e));
+  EXPECT_TRUE(b.starts_with(s1));
+  EXPECT_TRUE(b.starts_with(b));
+  EXPECT_TRUE(b.starts_with(e));
+  EXPECT_TRUE(e.starts_with(""));
+  EXPECT_TRUE(!a.starts_with(b));
+  EXPECT_TRUE(!b.starts_with(a));
+  EXPECT_TRUE(!e.starts_with(a));
+
+  // ends with
+  EXPECT_TRUE(a.ends_with(a));
+  EXPECT_TRUE(a.ends_with("bar"));
+  EXPECT_TRUE(a.ends_with(e));
+  EXPECT_TRUE(b.ends_with(s1));
+  EXPECT_TRUE(b.ends_with(b));
+  EXPECT_TRUE(b.ends_with(e));
+  EXPECT_TRUE(e.ends_with(""));
+  EXPECT_TRUE(!a.ends_with(b));
+  EXPECT_TRUE(!b.ends_with(a));
+  EXPECT_TRUE(!e.ends_with(a));
+
+  // remove_prefix
+  StringPiece c(a);
+  c.remove_prefix(3);
+  EXPECT_EQ(c, "bar");
+  c = a;
+  c.remove_prefix(0);
+  EXPECT_EQ(c, a);
+  c.remove_prefix(c.size());
+  EXPECT_EQ(c, e);
+
+  // remove_suffix
+  c = a;
+  c.remove_suffix(3);
+  EXPECT_EQ(c, "foo");
+  c = a;
+  c.remove_suffix(0);
+  EXPECT_EQ(c, a);
+  c.remove_suffix(c.size());
+  EXPECT_EQ(c, e);
+
+  // set
+  c.set("foobar", 6);
+  EXPECT_EQ(c, a);
+  c.set("foobar", 0);
+  EXPECT_EQ(c, e);
+  c.set("foobar", 7);
+  EXPECT_NE(c, a);
+
+  c.set("foobar");
+  EXPECT_EQ(c, a);
+
+  c.set(static_cast<const void*>("foobar"), 6);
+  EXPECT_EQ(c, a);
+  c.set(static_cast<const void*>("foobar"), 0);
+  EXPECT_EQ(c, e);
+  c.set(static_cast<const void*>("foobar"), 7);
+  EXPECT_NE(c, a);
+
+  // as_string
+  string s3(a.as_string().c_str(), 7);
+  EXPECT_EQ(c, s3);
+  string s4(e.as_string());
+  EXPECT_TRUE(s4.empty());
+
+  // ToString
+  {
+    string s5(a.ToString().c_str(), 7);
+    EXPECT_EQ(c, s5);
+    string s6(e.ToString());
+    EXPECT_TRUE(s6.empty());
+  }
+
+  // Consume
+  a.set("foobar");
+  EXPECT_TRUE(a.Consume("foo"));
+  EXPECT_EQ(a, "bar");
+  EXPECT_FALSE(a.Consume("foo"));
+  EXPECT_FALSE(a.Consume("barbar"));
+  EXPECT_FALSE(a.Consume("ar"));
+  EXPECT_EQ(a, "bar");
+
+  a.set("foobar");
+  EXPECT_TRUE(a.ConsumeFromEnd("bar"));
+  EXPECT_EQ(a, "foo");
+  EXPECT_FALSE(a.ConsumeFromEnd("bar"));
+  EXPECT_FALSE(a.ConsumeFromEnd("foofoo"));
+  EXPECT_FALSE(a.ConsumeFromEnd("fo"));
+  EXPECT_EQ(a, "foo");
+}
+
+TEST(StringPiece, Contains) {
+  StringPiece a("abcdefg");
+  StringPiece b("abcd");
+  StringPiece c("efg");
+  StringPiece d("gh");
+  EXPECT_TRUE(a.contains(b));
+  EXPECT_TRUE(a.contains(c));
+  EXPECT_TRUE(!a.contains(d));
+}
+
+TEST(StringPiece, NULLInput) {
+  // we used to crash here, but now we don't.
+  StringPiece s(NULL);
+  EXPECT_EQ(s.data(), (const char*)NULL);
+  EXPECT_EQ(s.size(), 0);
+
+  s.set(NULL);
+  EXPECT_EQ(s.data(), (const char*)NULL);
+  EXPECT_EQ(s.size(), 0);
+
+  // .ToString() on a StringPiece with NULL should produce the empty string.
+  EXPECT_EQ("", s.ToString());
+  EXPECT_EQ("", s.as_string());
+}
+
+TEST(StringPiece, Comparisons2) {
+  StringPiece abc("abcdefghijklmnopqrstuvwxyz");
+
+  // check comparison operations on strings longer than 4 bytes.
+  EXPECT_EQ(abc, StringPiece("abcdefghijklmnopqrstuvwxyz"));
+  EXPECT_EQ(abc.compare(StringPiece("abcdefghijklmnopqrstuvwxyz")), 0);
+
+  EXPECT_LT(abc, StringPiece("abcdefghijklmnopqrstuvwxzz"));
+  EXPECT_LT(abc.compare(StringPiece("abcdefghijklmnopqrstuvwxzz")), 0);
+
+  EXPECT_GT(abc, StringPiece("abcdefghijklmnopqrstuvwxyy"));
+  EXPECT_GT(abc.compare(StringPiece("abcdefghijklmnopqrstuvwxyy")), 0);
+
+  // starts_with
+  EXPECT_TRUE(abc.starts_with(abc));
+  EXPECT_TRUE(abc.starts_with("abcdefghijklm"));
+  EXPECT_TRUE(!abc.starts_with("abcdefguvwxyz"));
+
+  // ends_with
+  EXPECT_TRUE(abc.ends_with(abc));
+  EXPECT_TRUE(!abc.ends_with("abcdefguvwxyz"));
+  EXPECT_TRUE(abc.ends_with("nopqrstuvwxyz"));
+}
+
+TEST(ComparisonOpsTest, StringCompareNotAmbiguous) {
+  EXPECT_EQ("hello", string("hello"));
+  EXPECT_LT("hello", string("world"));
+}
+
+TEST(ComparisonOpsTest, HeterogenousStringPieceEquals) {
+  EXPECT_EQ(StringPiece("hello"), string("hello"));
+  EXPECT_EQ("hello", StringPiece("hello"));
+}
+
+TEST(FindOneCharTest, EdgeCases) {
+  StringPiece a("xxyyyxx");
+
+  // Set a = "xyyyx".
+  a.remove_prefix(1);
+  a.remove_suffix(1);
+
+  EXPECT_EQ(0, a.find('x'));
+  EXPECT_EQ(0, a.find('x', 0));
+  EXPECT_EQ(4, a.find('x', 1));
+  EXPECT_EQ(4, a.find('x', 4));
+  EXPECT_EQ(StringPiece::npos, a.find('x', 5));
+
+  EXPECT_EQ(4, a.rfind('x'));
+  EXPECT_EQ(4, a.rfind('x', 5));
+  EXPECT_EQ(4, a.rfind('x', 4));
+  EXPECT_EQ(0, a.rfind('x', 3));
+  EXPECT_EQ(0, a.rfind('x', 0));
+
+  // Set a = "yyy".
+  a.remove_prefix(1);
+  a.remove_suffix(1);
+
+  EXPECT_EQ(StringPiece::npos, a.find('x'));
+  EXPECT_EQ(StringPiece::npos, a.rfind('x'));
+}
+
+#ifndef NDEBUG
+TEST(NonNegativeLenTest, NonNegativeLen) {
+  EXPECT_DEATH(StringPiece("xyz", -1), "len >= 0");
+}
+#endif  // ndef DEBUG
+
+}  // namespace
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/stringprintf.cc b/src/google/protobuf/stubs/stringprintf.cc
new file mode 100644
index 0000000..3272d8e
--- /dev/null
+++ b/src/google/protobuf/stubs/stringprintf.cc
@@ -0,0 +1,175 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// from google3/base/stringprintf.cc
+
+#include <google/protobuf/stubs/stringprintf.h>
+
+#include <errno.h>
+#include <stdarg.h> // For va_list and related operations
+#include <stdio.h> // MSVC requires this for _vsnprintf
+#include <vector>
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/testing/googletest.h>
+
+namespace google {
+namespace protobuf {
+
+#ifdef _MSC_VER
+enum { IS_COMPILER_MSVC = 1 };
+#ifndef va_copy
+// Define va_copy for MSVC. This is a hack, assuming va_list is simply a
+// pointer into the stack and is safe to copy.
+#define va_copy(dest, src) ((dest) = (src))
+#endif
+#else
+enum { IS_COMPILER_MSVC = 0 };
+#endif
+
+void StringAppendV(string* dst, const char* format, va_list ap) {
+  // First try with a small fixed size buffer
+  static const int kSpaceLength = 1024;
+  char space[kSpaceLength];
+
+  // It's possible for methods that use a va_list to invalidate
+  // the data in it upon use.  The fix is to make a copy
+  // of the structure before using it and use that copy instead.
+  va_list backup_ap;
+  va_copy(backup_ap, ap);
+  int result = vsnprintf(space, kSpaceLength, format, backup_ap);
+  va_end(backup_ap);
+
+  if (result < kSpaceLength) {
+    if (result >= 0) {
+      // Normal case -- everything fit.
+      dst->append(space, result);
+      return;
+    }
+
+    if (IS_COMPILER_MSVC) {
+      // Error or MSVC running out of space.  MSVC 8.0 and higher
+      // can be asked about space needed with the special idiom below:
+      va_copy(backup_ap, ap);
+      result = vsnprintf(NULL, 0, format, backup_ap);
+      va_end(backup_ap);
+    }
+
+    if (result < 0) {
+      // Just an error.
+      return;
+    }
+  }
+
+  // Increase the buffer size to the size requested by vsnprintf,
+  // plus one for the closing \0.
+  int length = result+1;
+  char* buf = new char[length];
+
+  // Restore the va_list before we use it again
+  va_copy(backup_ap, ap);
+  result = vsnprintf(buf, length, format, backup_ap);
+  va_end(backup_ap);
+
+  if (result >= 0 && result < length) {
+    // It fit
+    dst->append(buf, result);
+  }
+  delete[] buf;
+}
+
+
+string StringPrintf(const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  string result;
+  StringAppendV(&result, format, ap);
+  va_end(ap);
+  return result;
+}
+
+const string& SStringPrintf(string* dst, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  dst->clear();
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+  return *dst;
+}
+
+void StringAppendF(string* dst, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+}
+
+// Max arguments supported by StringPrintVector
+const int kStringPrintfVectorMaxArgs = 32;
+
+// An empty block of zero for filler arguments.  This is const so that if
+// printf tries to write to it (via %n) then the program gets a SIGSEGV
+// and we can fix the problem or protect against an attack.
+static const char string_printf_empty_block[256] = { '\0' };
+
+string StringPrintfVector(const char* format, const vector<string>& v) {
+  GOOGLE_CHECK_LE(v.size(), kStringPrintfVectorMaxArgs)
+      << "StringPrintfVector currently only supports up to "
+      << kStringPrintfVectorMaxArgs << " arguments. "
+      << "Feel free to add support for more if you need it.";
+
+  // Add filler arguments so that bogus format+args have a harder time
+  // crashing the program, corrupting the program (%n),
+  // or displaying random chunks of memory to users.
+
+  const char* cstr[kStringPrintfVectorMaxArgs];
+  for (int i = 0; i < v.size(); ++i) {
+    cstr[i] = v[i].c_str();
+  }
+  for (int i = v.size(); i < GOOGLE_ARRAYSIZE(cstr); ++i) {
+    cstr[i] = &string_printf_empty_block[0];
+  }
+
+  // I do not know any way to pass kStringPrintfVectorMaxArgs arguments,
+  // or any way to build a va_list by hand, or any API for printf
+  // that accepts an array of arguments.  The best I can do is stick
+  // this COMPILE_ASSERT right next to the actual statement.
+
+  GOOGLE_COMPILE_ASSERT(kStringPrintfVectorMaxArgs == 32, arg_count_mismatch);
+  return StringPrintf(format,
+                      cstr[0], cstr[1], cstr[2], cstr[3], cstr[4],
+                      cstr[5], cstr[6], cstr[7], cstr[8], cstr[9],
+                      cstr[10], cstr[11], cstr[12], cstr[13], cstr[14],
+                      cstr[15], cstr[16], cstr[17], cstr[18], cstr[19],
+                      cstr[20], cstr[21], cstr[22], cstr[23], cstr[24],
+                      cstr[25], cstr[26], cstr[27], cstr[28], cstr[29],
+                      cstr[30], cstr[31]);
+}
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/stringprintf.h b/src/google/protobuf/stubs/stringprintf.h
new file mode 100644
index 0000000..ab1ab55
--- /dev/null
+++ b/src/google/protobuf/stubs/stringprintf.h
@@ -0,0 +1,76 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// from google3/base/stringprintf.h
+//
+// Printf variants that place their output in a C++ string.
+//
+// Usage:
+//      string result = StringPrintf("%d %s\n", 10, "hello");
+//      SStringPrintf(&result, "%d %s\n", 10, "hello");
+//      StringAppendF(&result, "%d %s\n", 20, "there");
+
+#ifndef GOOGLE_PROTOBUF_STUBS_STRINGPRINTF_H
+#define GOOGLE_PROTOBUF_STUBS_STRINGPRINTF_H
+
+#include <stdarg.h>
+#include <string>
+#include <vector>
+
+#include <google/protobuf/stubs/common.h>
+
+namespace google {
+namespace protobuf {
+
+// Return a C++ string
+LIBPROTOBUF_EXPORT extern string StringPrintf(const char* format, ...);
+
+// Store result into a supplied string and return it
+LIBPROTOBUF_EXPORT extern const string& SStringPrintf(string* dst, const char* format, ...);
+
+// Append result to a supplied string
+LIBPROTOBUF_EXPORT extern void StringAppendF(string* dst, const char* format, ...);
+
+// Lower-level routine that takes a va_list and appends to a specified
+// string.  All other routines are just convenience wrappers around it.
+LIBPROTOBUF_EXPORT extern void StringAppendV(string* dst, const char* format, va_list ap);
+
+// The max arguments supported by StringPrintfVector
+LIBPROTOBUF_EXPORT extern const int kStringPrintfVectorMaxArgs;
+
+// You can use this version when all your arguments are strings, but
+// you don't know how many arguments you'll have at compile time.
+// StringPrintfVector will LOG(FATAL) if v.size() > kStringPrintfVectorMaxArgs
+LIBPROTOBUF_EXPORT extern string StringPrintfVector(const char* format, const vector<string>& v);
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_STRINGPRINTF_H
diff --git a/src/google/protobuf/stubs/stringprintf_unittest.cc b/src/google/protobuf/stubs/stringprintf_unittest.cc
new file mode 100644
index 0000000..15895e5
--- /dev/null
+++ b/src/google/protobuf/stubs/stringprintf_unittest.cc
@@ -0,0 +1,152 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2012 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// from google3/base/stringprintf_unittest.cc
+
+#include <google/protobuf/stubs/stringprintf.h>
+
+#include <cerrno>
+#include <string>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+namespace {
+
+TEST(StringPrintfTest, Empty) {
+#if 0
+  // gcc 2.95.3, gcc 4.1.0, and gcc 4.2.2 all warn about this:
+  // warning: zero-length printf format string.
+  // so we do not allow them in google3.
+  EXPECT_EQ("", StringPrintf(""));
+#endif
+  EXPECT_EQ("", StringPrintf("%s", string().c_str()));
+  EXPECT_EQ("", StringPrintf("%s", ""));
+}
+
+TEST(StringPrintfTest, Misc) {
+// MSVC and mingw does not support $ format specifier.
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+  EXPECT_EQ("123hello w", StringPrintf("%3$d%2$s %1$c", 'w', "hello", 123));
+#endif  // !_MSC_VER
+}
+
+TEST(StringAppendFTest, Empty) {
+  string value("Hello");
+  const char* empty = "";
+  StringAppendF(&value, "%s", empty);
+  EXPECT_EQ("Hello", value);
+}
+
+TEST(StringAppendFTest, EmptyString) {
+  string value("Hello");
+  StringAppendF(&value, "%s", "");
+  EXPECT_EQ("Hello", value);
+}
+
+TEST(StringAppendFTest, String) {
+  string value("Hello");
+  StringAppendF(&value, " %s", "World");
+  EXPECT_EQ("Hello World", value);
+}
+
+TEST(StringAppendFTest, Int) {
+  string value("Hello");
+  StringAppendF(&value, " %d", 123);
+  EXPECT_EQ("Hello 123", value);
+}
+
+TEST(StringPrintfTest, Multibyte) {
+  // If we are in multibyte mode and feed invalid multibyte sequence,
+  // StringPrintf should return an empty string instead of running
+  // out of memory while trying to determine destination buffer size.
+  // see b/4194543.
+
+  char* old_locale = setlocale(LC_CTYPE, NULL);
+  // Push locale with multibyte mode
+  setlocale(LC_CTYPE, "en_US.utf8");
+
+  const char kInvalidCodePoint[] = "\375\067s";
+  string value = StringPrintf("%.*s", 3, kInvalidCodePoint);
+
+  // In some versions of glibc (e.g. eglibc-2.11.1, aka GRTEv2), snprintf
+  // returns error given an invalid codepoint. Other versions
+  // (e.g. eglibc-2.15, aka pre-GRTEv3) emit the codepoint verbatim.
+  // We test that the output is one of the above.
+  EXPECT_TRUE(value.empty() || value == kInvalidCodePoint);
+
+  // Repeat with longer string, to make sure that the dynamically
+  // allocated path in StringAppendV is handled correctly.
+  int n = 2048;
+  char* buf = new char[n+1];
+  memset(buf, ' ', n-3);
+  memcpy(buf + n - 3, kInvalidCodePoint, 4);
+  value =  StringPrintf("%.*s", n, buf);
+  // See GRTEv2 vs. GRTEv3 comment above.
+  EXPECT_TRUE(value.empty() || value == buf);
+  delete[] buf;
+
+  setlocale(LC_CTYPE, old_locale);
+}
+
+TEST(StringPrintfTest, NoMultibyte) {
+  // No multibyte handling, but the string contains funny chars.
+  char* old_locale = setlocale(LC_CTYPE, NULL);
+  setlocale(LC_CTYPE, "POSIX");
+  string value = StringPrintf("%.*s", 3, "\375\067s");
+  setlocale(LC_CTYPE, old_locale);
+  EXPECT_EQ("\375\067s", value);
+}
+
+TEST(StringPrintfTest, DontOverwriteErrno) {
+  // Check that errno isn't overwritten unless we're printing
+  // something significantly larger than what people are normally
+  // printing in their badly written PLOG() statements.
+  errno = ECHILD;
+  string value = StringPrintf("Hello, %s!", "World");
+  EXPECT_EQ(ECHILD, errno);
+}
+
+TEST(StringPrintfTest, LargeBuf) {
+  // Check that the large buffer is handled correctly.
+  int n = 2048;
+  char* buf = new char[n+1];
+  memset(buf, ' ', n);
+  buf[n] = 0;
+  string value = StringPrintf("%s", buf);
+  EXPECT_EQ(buf, value);
+  delete[] buf;
+}
+
+}  // anonymous namespace
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/structurally_valid.cc b/src/google/protobuf/stubs/structurally_valid.cc
new file mode 100644
index 0000000..d79a6ee
--- /dev/null
+++ b/src/google/protobuf/stubs/structurally_valid.cc
@@ -0,0 +1,588 @@
+// Copyright 2005-2008 Google Inc. All Rights Reserved.
+// Author: jrm@google.com (Jim Meehan)
+
+#include <google/protobuf/stubs/common.h>
+
+#include <google/protobuf/stubs/stringpiece.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// These four-byte entries compactly encode how many bytes 0..255 to delete
+// in making a string replacement, how many bytes to add 0..255, and the offset
+// 0..64k-1 of the replacement string in remap_string.
+struct RemapEntry {
+  uint8 delete_bytes;
+  uint8 add_bytes;
+  uint16 bytes_offset;
+};
+
+// Exit type codes for state tables. All but the first get stuffed into
+// signed one-byte entries. The first is only generated by executable code.
+// To distinguish from next-state entries, these must be contiguous and
+// all <= kExitNone
+typedef enum {
+  kExitDstSpaceFull = 239,
+  kExitIllegalStructure,  // 240
+  kExitOK,                // 241
+  kExitReject,            // ...
+  kExitReplace1,
+  kExitReplace2,
+  kExitReplace3,
+  kExitReplace21,
+  kExitReplace31,
+  kExitReplace32,
+  kExitReplaceOffset1,
+  kExitReplaceOffset2,
+  kExitReplace1S0,
+  kExitSpecial,
+  kExitDoAgain,
+  kExitRejectAlt,
+  kExitNone               // 255
+} ExitReason;
+
+
+// This struct represents one entire state table. The three initialized byte
+// areas are state_table, remap_base, and remap_string. state0 and state0_size
+// give the byte offset and length within state_table of the initial state --
+// table lookups are expected to start and end in this state, but for
+// truncated UTF-8 strings, may end in a different state. These allow a quick
+// test for that condition. entry_shift is 8 for tables subscripted by a full
+// byte value and 6 for space-optimized tables subscripted by only six
+// significant bits in UTF-8 continuation bytes.
+typedef struct {
+  const uint32 state0;
+  const uint32 state0_size;
+  const uint32 total_size;
+  const int max_expand;
+  const int entry_shift;
+  const int bytes_per_entry;
+  const uint32 losub;
+  const uint32 hiadd;
+  const uint8* state_table;
+  const RemapEntry* remap_base;
+  const uint8* remap_string;
+  const uint8* fast_state;
+} UTF8StateMachineObj;
+
+typedef UTF8StateMachineObj UTF8ScanObj;
+
+#define X__ (kExitIllegalStructure)
+#define RJ_ (kExitReject)
+#define S1_ (kExitReplace1)
+#define S2_ (kExitReplace2)
+#define S3_ (kExitReplace3)
+#define S21 (kExitReplace21)
+#define S31 (kExitReplace31)
+#define S32 (kExitReplace32)
+#define T1_ (kExitReplaceOffset1)
+#define T2_ (kExitReplaceOffset2)
+#define S11 (kExitReplace1S0)
+#define SP_ (kExitSpecial)
+#define D__ (kExitDoAgain)
+#define RJA (kExitRejectAlt)
+
+//  Entire table has 9 state blocks of 256 entries each
+static const unsigned int utf8acceptnonsurrogates_STATE0 = 0;     // state[0]
+static const unsigned int utf8acceptnonsurrogates_STATE0_SIZE = 256;  // =[1]
+static const unsigned int utf8acceptnonsurrogates_TOTAL_SIZE = 2304;
+static const unsigned int utf8acceptnonsurrogates_MAX_EXPAND_X4 = 0;
+static const unsigned int utf8acceptnonsurrogates_SHIFT = 8;
+static const unsigned int utf8acceptnonsurrogates_BYTES = 1;
+static const unsigned int utf8acceptnonsurrogates_LOSUB = 0x20202020;
+static const unsigned int utf8acceptnonsurrogates_HIADD = 0x00000000;
+
+static const uint8 utf8acceptnonsurrogates[] = {
+// state[0] 0x000000 Byte 1
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+  2,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   7,   3,   3,
+  4,   5,   5,   5,   6, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+// state[1] 0x000080 Byte 2 of 2
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+  0,   0,   0,   0,   0,   0,   0,   0,    0,   0,   0,   0,   0,   0,   0,   0,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+// state[2] 0x000000 Byte 2 of 3
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+// state[3] 0x001000 Byte 2 of 3
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+// state[4] 0x000000 Byte 2 of 4
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+  3,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   3,   3,   3,
+  3,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   3,   3,   3,
+  3,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   3,   3,   3,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+// state[5] 0x040000 Byte 2 of 4
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+  3,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   3,   3,   3,
+  3,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   3,   3,   3,
+  3,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   3,   3,   3,
+  3,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   3,   3,   3,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+// state[6] 0x100000 Byte 2 of 4
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+  3,   3,   3,   3,   3,   3,   3,   3,    3,   3,   3,   3,   3,   3,   3,   3,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+// state[7] 0x00d000 Byte 2 of 3
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+  1,   1,   1,   1,   1,   1,   1,   1,    1,   1,   1,   1,   1,   1,   1,   1,
+  8,   8,   8,   8,   8,   8,   8,   8,    8,   8,   8,   8,   8,   8,   8,   8,
+  8,   8,   8,   8,   8,   8,   8,   8,    8,   8,   8,   8,   8,   8,   8,   8,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+// state[8] 0x00d800 Byte 3 of 3
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+
+RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_,  RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_,
+RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_,  RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_,
+RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_,  RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_,
+RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_,  RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_,
+
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+X__, X__, X__, X__, X__, X__, X__, X__,  X__, X__, X__, X__, X__, X__, X__, X__,
+};
+
+// Remap base[0] = (del, add, string_offset)
+static const RemapEntry utf8acceptnonsurrogates_remap_base[] = {
+{0, 0, 0} };
+
+// Remap string[0]
+static const unsigned char utf8acceptnonsurrogates_remap_string[] = {
+0 };
+
+static const unsigned char utf8acceptnonsurrogates_fast[256] = {
+0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0,
+
+0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0, 0,
+
+1, 1, 1, 1, 1, 1, 1, 1,  1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1,  1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1,  1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1,  1, 1, 1, 1, 1, 1, 1, 1,
+
+1, 1, 1, 1, 1, 1, 1, 1,  1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1,  1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1,  1, 1, 1, 1, 1, 1, 1, 1,
+1, 1, 1, 1, 1, 1, 1, 1,  1, 1, 1, 1, 1, 1, 1, 1,
+};
+
+static const UTF8ScanObj utf8acceptnonsurrogates_obj = {
+  utf8acceptnonsurrogates_STATE0,
+  utf8acceptnonsurrogates_STATE0_SIZE,
+  utf8acceptnonsurrogates_TOTAL_SIZE,
+  utf8acceptnonsurrogates_MAX_EXPAND_X4,
+  utf8acceptnonsurrogates_SHIFT,
+  utf8acceptnonsurrogates_BYTES,
+  utf8acceptnonsurrogates_LOSUB,
+  utf8acceptnonsurrogates_HIADD,
+  utf8acceptnonsurrogates,
+  utf8acceptnonsurrogates_remap_base,
+  utf8acceptnonsurrogates_remap_string,
+  utf8acceptnonsurrogates_fast
+};
+
+
+#undef X__
+#undef RJ_
+#undef S1_
+#undef S2_
+#undef S3_
+#undef S21
+#undef S31
+#undef S32
+#undef T1_
+#undef T2_
+#undef S11
+#undef SP_
+#undef D__
+#undef RJA
+
+// Return true if current Tbl pointer is within state0 range
+// Note that unsigned compare checks both ends of range simultaneously
+static inline bool InStateZero(const UTF8ScanObj* st, const uint8* Tbl) {
+  const uint8* Tbl0 = &st->state_table[st->state0];
+  return (static_cast<uint32>(Tbl - Tbl0) < st->state0_size);
+}
+
+// Scan a UTF-8 string based on state table.
+// Always scan complete UTF-8 characters
+// Set number of bytes scanned. Return reason for exiting
+int UTF8GenericScan(const UTF8ScanObj* st,
+                    const char * str,
+                    int str_length,
+                    int* bytes_consumed) {
+  *bytes_consumed = 0;
+  if (str_length == 0) return kExitOK;
+
+  int eshift = st->entry_shift;
+  const uint8* isrc = reinterpret_cast<const uint8*>(str);
+  const uint8* src = isrc;
+  const uint8* srclimit = isrc + str_length;
+  const uint8* srclimit8 = srclimit - 7;
+  const uint8* Tbl_0 = &st->state_table[st->state0];
+
+ DoAgain:
+  // Do state-table scan
+  int e = 0;
+  uint8 c;
+  const uint8* Tbl2 = &st->fast_state[0];
+  const uint32 losub = st->losub;
+  const uint32 hiadd = st->hiadd;
+  // Check initial few bytes one at a time until 8-byte aligned
+  //----------------------------
+  while ((((uintptr_t)src & 0x07) != 0) &&
+         (src < srclimit) &&
+         Tbl2[src[0]] == 0) {
+    src++;
+  }
+  if (((uintptr_t)src & 0x07) == 0) {
+    // Do fast for groups of 8 identity bytes.
+    // This covers a lot of 7-bit ASCII ~8x faster then the 1-byte loop,
+    // including slowing slightly on cr/lf/ht
+    //----------------------------
+    while (src < srclimit8) {
+      uint32 s0123 = (reinterpret_cast<const uint32 *>(src))[0];
+      uint32 s4567 = (reinterpret_cast<const uint32 *>(src))[1];
+      src += 8;
+      // This is a fast range check for all bytes in [lowsub..0x80-hiadd)
+      uint32 temp = (s0123 - losub) | (s0123 + hiadd) |
+                    (s4567 - losub) | (s4567 + hiadd);
+      if ((temp & 0x80808080) != 0) {
+        // We typically end up here on cr/lf/ht; src was incremented
+        int e0123 = (Tbl2[src[-8]] | Tbl2[src[-7]]) |
+                    (Tbl2[src[-6]] | Tbl2[src[-5]]);
+        if (e0123 != 0) {
+          src -= 8;
+          break;
+        }    // Exit on Non-interchange
+        e0123 = (Tbl2[src[-4]] | Tbl2[src[-3]]) |
+                (Tbl2[src[-2]] | Tbl2[src[-1]]);
+        if (e0123 != 0) {
+          src -= 4;
+          break;
+        }    // Exit on Non-interchange
+        // Else OK, go around again
+      }
+    }
+  }
+  //----------------------------
+
+  // Byte-at-a-time scan
+  //----------------------------
+  const uint8* Tbl = Tbl_0;
+  while (src < srclimit) {
+    c = *src;
+    e = Tbl[c];
+    src++;
+    if (e >= kExitIllegalStructure) {break;}
+    Tbl = &Tbl_0[e << eshift];
+  }
+  //----------------------------
+
+
+  // Exit posibilities:
+  //  Some exit code, !state0, back up over last char
+  //  Some exit code, state0, back up one byte exactly
+  //  source consumed, !state0, back up over partial char
+  //  source consumed, state0, exit OK
+  // For illegal byte in state0, avoid backup up over PREVIOUS char
+  // For truncated last char, back up to beginning of it
+
+  if (e >= kExitIllegalStructure) {
+    // Back up over exactly one byte of rejected/illegal UTF-8 character
+    src--;
+    // Back up more if needed
+    if (!InStateZero(st, Tbl)) {
+      do {
+        src--;
+      } while ((src > isrc) && ((src[0] & 0xc0) == 0x80));
+    }
+  } else if (!InStateZero(st, Tbl)) {
+    // Back up over truncated UTF-8 character
+    e = kExitIllegalStructure;
+    do {
+      src--;
+    } while ((src > isrc) && ((src[0] & 0xc0) == 0x80));
+  } else {
+    // Normal termination, source fully consumed
+    e = kExitOK;
+  }
+
+  if (e == kExitDoAgain) {
+    // Loop back up to the fast scan
+    goto DoAgain;
+  }
+
+  *bytes_consumed = src - isrc;
+  return e;
+}
+
+int UTF8GenericScanFastAscii(const UTF8ScanObj* st,
+                    const char * str,
+                    int str_length,
+                    int* bytes_consumed) {
+  *bytes_consumed = 0;
+  if (str_length == 0) return kExitOK;
+
+  const uint8* isrc =  reinterpret_cast<const uint8*>(str);
+  const uint8* src = isrc;
+  const uint8* srclimit = isrc + str_length;
+  const uint8* srclimit8 = srclimit - 7;
+  int n;
+  int rest_consumed;
+  int exit_reason;
+  do {
+    // Check initial few bytes one at a time until 8-byte aligned
+    while ((((uintptr_t)src & 0x07) != 0) &&
+           (src < srclimit) && (src[0] < 0x80)) {
+      src++;
+    }
+    if (((uintptr_t)src & 0x07) == 0) {
+      while ((src < srclimit8) &&
+             (((reinterpret_cast<const uint32*>(src)[0] |
+                reinterpret_cast<const uint32*>(src)[1]) & 0x80808080) == 0)) {
+        src += 8;
+      }
+    }
+    while ((src < srclimit) && (src[0] < 0x80)) {
+      src++;
+    }
+    // Run state table on the rest
+    n = src - isrc;
+    exit_reason = UTF8GenericScan(st, str + n, str_length - n, &rest_consumed);
+    src += rest_consumed;
+  } while ( exit_reason == kExitDoAgain );
+
+  *bytes_consumed = src - isrc;
+  return exit_reason;
+}
+
+// Hack:  On some compilers the static tables are initialized at startup.
+//   We can't use them until they are initialized.  However, some Protocol
+//   Buffer parsing happens at static init time and may try to validate
+//   UTF-8 strings.  Since UTF-8 validation is only used for debugging
+//   anyway, we simply always return success if initialization hasn't
+//   occurred yet.
+namespace {
+
+bool module_initialized_ = false;
+
+struct InitDetector {
+  InitDetector() {
+    module_initialized_ = true;
+  }
+};
+InitDetector init_detector;
+
+}  // namespace
+
+bool IsStructurallyValidUTF8(const char* buf, int len) {
+  if (!module_initialized_) return true;
+  
+  int bytes_consumed = 0;
+  UTF8GenericScanFastAscii(&utf8acceptnonsurrogates_obj,
+                           buf, len, &bytes_consumed);
+  return (bytes_consumed == len);
+}
+
+int UTF8SpnStructurallyValid(const StringPiece& str) {
+  if (!module_initialized_) return str.size();
+
+  int bytes_consumed = 0;
+  UTF8GenericScanFastAscii(&utf8acceptnonsurrogates_obj,
+                           str.data(), str.size(), &bytes_consumed);
+  return bytes_consumed;
+}
+
+// Coerce UTF-8 byte string in src_str to be
+// a structurally-valid equal-length string by selectively
+// overwriting illegal bytes with replace_char (typically blank).
+// replace_char must be legal printable 7-bit Ascii 0x20..0x7e.
+// src_str is read-only. If any overwriting is needed, a modified byte string
+// is created in idst, length isrclen.
+//
+// Returns pointer to output buffer, isrc if no changes were made,
+//  or idst if some bytes were changed.
+//
+// Fast case: all is structurally valid and no byte copying is done.
+//
+char* UTF8CoerceToStructurallyValid(const StringPiece& src_str,
+                                    char* idst,
+                                    const char replace_char) {
+  const char* isrc = src_str.data();
+  const int len = src_str.length();
+  int n = UTF8SpnStructurallyValid(src_str);
+  if (n == len) {               // Normal case -- all is cool, return
+    return const_cast<char*>(isrc);
+  } else {                      // Unusual case -- copy w/o bad bytes
+    const char* src = isrc;
+    const char* srclimit = isrc + len;
+    char* dst = idst;
+    memmove(dst, src, n);       // Copy initial good chunk
+    src += n;
+    dst += n;
+    while (src < srclimit) {    // src points to bogus byte or is off the end
+      dst[0] = replace_char;                    // replace one bad byte
+      src++;
+      dst++;
+      StringPiece str2(src, srclimit - src);
+      n = UTF8SpnStructurallyValid(str2);       // scan the remainder
+      memmove(dst, src, n);                     // copy next good chunk
+      src += n;
+      dst += n;
+    }
+  }
+  return idst;
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/structurally_valid_unittest.cc b/src/google/protobuf/stubs/structurally_valid_unittest.cc
new file mode 100644
index 0000000..9088888
--- /dev/null
+++ b/src/google/protobuf/stubs/structurally_valid_unittest.cc
@@ -0,0 +1,40 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+// Author: xpeng@google.com (Peter Peng)
+
+#include <google/protobuf/stubs/common.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+namespace {
+
+TEST(StructurallyValidTest, ValidUTF8String) {
+  // On GCC, this string can be written as:
+  //   "abcd 1234 - \u2014\u2013\u2212"
+  // MSVC seems to interpret \u differently.
+  string valid_str("abcd 1234 - \342\200\224\342\200\223\342\210\222 - xyz789");
+  EXPECT_TRUE(IsStructurallyValidUTF8(valid_str.data(),
+                                      valid_str.size()));
+  // Additional check for pointer alignment
+  for (int i = 1; i < 8; ++i) {
+    EXPECT_TRUE(IsStructurallyValidUTF8(valid_str.data() + i,
+                                        valid_str.size() - i));
+  }
+}
+
+TEST(StructurallyValidTest, InvalidUTF8String) {
+  const string invalid_str("abcd\xA0\xB0\xA0\xB0\xA0\xB0 - xyz789");
+  EXPECT_FALSE(IsStructurallyValidUTF8(invalid_str.data(),
+                                       invalid_str.size()));
+  // Additional check for pointer alignment
+  for (int i = 1; i < 8; ++i) {
+    EXPECT_FALSE(IsStructurallyValidUTF8(invalid_str.data() + i,
+                                         invalid_str.size() - i));
+  }
+}
+
+}  // namespace
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/strutil.cc b/src/google/protobuf/stubs/strutil.cc
new file mode 100644
index 0000000..7ba92e8
--- /dev/null
+++ b/src/google/protobuf/stubs/strutil.cc
@@ -0,0 +1,2289 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// from google3/strings/strutil.cc
+
+#include <google/protobuf/stubs/strutil.h>
+#include <google/protobuf/stubs/mathlimits.h>
+
+#include <errno.h>
+#include <float.h>    // FLT_DIG and DBL_DIG
+#include <limits>
+#include <limits.h>
+#include <stdio.h>
+#include <iterator>
+
+#include <google/protobuf/stubs/stl_util.h>
+
+#ifdef _WIN32
+// MSVC has only _snprintf, not snprintf.
+//
+// MinGW has both snprintf and _snprintf, but they appear to be different
+// functions.  The former is buggy.  When invoked like so:
+//   char buffer[32];
+//   snprintf(buffer, 32, "%.*g\n", FLT_DIG, 1.23e10f);
+// it prints "1.23000e+10".  This is plainly wrong:  %g should never print
+// trailing zeros after the decimal point.  For some reason this bug only
+// occurs with some input values, not all.  In any case, _snprintf does the
+// right thing, so we use it.
+#define snprintf _snprintf
+#endif
+
+namespace google {
+namespace protobuf {
+
+// These are defined as macros on some platforms.  #undef them so that we can
+// redefine them.
+#undef isxdigit
+#undef isprint
+
+// The definitions of these in ctype.h change based on locale.  Since our
+// string manipulation is all in relation to the protocol buffer and C++
+// languages, we always want to use the C locale.  So, we re-define these
+// exactly as we want them.
+inline bool isxdigit(char c) {
+  return ('0' <= c && c <= '9') ||
+         ('a' <= c && c <= 'f') ||
+         ('A' <= c && c <= 'F');
+}
+
+inline bool isprint(char c) {
+  return c >= 0x20 && c <= 0x7E;
+}
+
+// ----------------------------------------------------------------------
+// StripString
+//    Replaces any occurrence of the character 'remove' (or the characters
+//    in 'remove') with the character 'replacewith'.
+// ----------------------------------------------------------------------
+void StripString(string* s, const char* remove, char replacewith) {
+  const char * str_start = s->c_str();
+  const char * str = str_start;
+  for (str = strpbrk(str, remove);
+       str != NULL;
+       str = strpbrk(str + 1, remove)) {
+    (*s)[str - str_start] = replacewith;
+  }
+}
+
+void StripWhitespace(string* str) {
+  int str_length = str->length();
+
+  // Strip off leading whitespace.
+  int first = 0;
+  while (first < str_length && ascii_isspace(str->at(first))) {
+    ++first;
+  }
+  // If entire string is white space.
+  if (first == str_length) {
+    str->clear();
+    return;
+  }
+  if (first > 0) {
+    str->erase(0, first);
+    str_length -= first;
+  }
+
+  // Strip off trailing whitespace.
+  int last = str_length - 1;
+  while (last >= 0 && ascii_isspace(str->at(last))) {
+    --last;
+  }
+  if (last != (str_length - 1) && last >= 0) {
+    str->erase(last + 1, string::npos);
+  }
+}
+
+// ----------------------------------------------------------------------
+// StringReplace()
+//    Replace the "old" pattern with the "new" pattern in a string,
+//    and append the result to "res".  If replace_all is false,
+//    it only replaces the first instance of "old."
+// ----------------------------------------------------------------------
+
+void StringReplace(const string& s, const string& oldsub,
+                   const string& newsub, bool replace_all,
+                   string* res) {
+  if (oldsub.empty()) {
+    res->append(s);  // if empty, append the given string.
+    return;
+  }
+
+  string::size_type start_pos = 0;
+  string::size_type pos;
+  do {
+    pos = s.find(oldsub, start_pos);
+    if (pos == string::npos) {
+      break;
+    }
+    res->append(s, start_pos, pos - start_pos);
+    res->append(newsub);
+    start_pos = pos + oldsub.size();  // start searching again after the "old"
+  } while (replace_all);
+  res->append(s, start_pos, s.length() - start_pos);
+}
+
+// ----------------------------------------------------------------------
+// StringReplace()
+//    Give me a string and two patterns "old" and "new", and I replace
+//    the first instance of "old" in the string with "new", if it
+//    exists.  If "global" is true; call this repeatedly until it
+//    fails.  RETURN a new string, regardless of whether the replacement
+//    happened or not.
+// ----------------------------------------------------------------------
+
+string StringReplace(const string& s, const string& oldsub,
+                     const string& newsub, bool replace_all) {
+  string ret;
+  StringReplace(s, oldsub, newsub, replace_all, &ret);
+  return ret;
+}
+
+// ----------------------------------------------------------------------
+// SplitStringUsing()
+//    Split a string using a character delimiter. Append the components
+//    to 'result'.
+//
+// Note: For multi-character delimiters, this routine will split on *ANY* of
+// the characters in the string, not the entire string as a single delimiter.
+// ----------------------------------------------------------------------
+template <typename ITR>
+static inline
+void SplitStringToIteratorUsing(const string& full,
+                                const char* delim,
+                                ITR& result) {
+  // Optimize the common case where delim is a single character.
+  if (delim[0] != '\0' && delim[1] == '\0') {
+    char c = delim[0];
+    const char* p = full.data();
+    const char* end = p + full.size();
+    while (p != end) {
+      if (*p == c) {
+        ++p;
+      } else {
+        const char* start = p;
+        while (++p != end && *p != c);
+        *result++ = string(start, p - start);
+      }
+    }
+    return;
+  }
+
+  string::size_type begin_index, end_index;
+  begin_index = full.find_first_not_of(delim);
+  while (begin_index != string::npos) {
+    end_index = full.find_first_of(delim, begin_index);
+    if (end_index == string::npos) {
+      *result++ = full.substr(begin_index);
+      return;
+    }
+    *result++ = full.substr(begin_index, (end_index - begin_index));
+    begin_index = full.find_first_not_of(delim, end_index);
+  }
+}
+
+void SplitStringUsing(const string& full,
+                      const char* delim,
+                      vector<string>* result) {
+  back_insert_iterator< vector<string> > it(*result);
+  SplitStringToIteratorUsing(full, delim, it);
+}
+
+// Split a string using a character delimiter. Append the components
+// to 'result'.  If there are consecutive delimiters, this function
+// will return corresponding empty strings. The string is split into
+// at most the specified number of pieces greedily. This means that the
+// last piece may possibly be split further. To split into as many pieces
+// as possible, specify 0 as the number of pieces.
+//
+// If "full" is the empty string, yields an empty string as the only value.
+//
+// If "pieces" is negative for some reason, it returns the whole string
+// ----------------------------------------------------------------------
+template <typename StringType, typename ITR>
+static inline
+void SplitStringToIteratorAllowEmpty(const StringType& full,
+                                     const char* delim,
+                                     int pieces,
+                                     ITR& result) {
+  string::size_type begin_index, end_index;
+  begin_index = 0;
+
+  for (int i = 0; (i < pieces-1) || (pieces == 0); i++) {
+    end_index = full.find_first_of(delim, begin_index);
+    if (end_index == string::npos) {
+      *result++ = full.substr(begin_index);
+      return;
+    }
+    *result++ = full.substr(begin_index, (end_index - begin_index));
+    begin_index = end_index + 1;
+  }
+  *result++ = full.substr(begin_index);
+}
+
+void SplitStringAllowEmpty(const string& full, const char* delim,
+                           vector<string>* result) {
+  back_insert_iterator<vector<string> > it(*result);
+  SplitStringToIteratorAllowEmpty(full, delim, 0, it);
+}
+
+// ----------------------------------------------------------------------
+// JoinStrings()
+//    This merges a vector of string components with delim inserted
+//    as separaters between components.
+//
+// ----------------------------------------------------------------------
+template <class ITERATOR>
+static void JoinStringsIterator(const ITERATOR& start,
+                                const ITERATOR& end,
+                                const char* delim,
+                                string* result) {
+  GOOGLE_CHECK(result != NULL);
+  result->clear();
+  int delim_length = strlen(delim);
+
+  // Precompute resulting length so we can reserve() memory in one shot.
+  int length = 0;
+  for (ITERATOR iter = start; iter != end; ++iter) {
+    if (iter != start) {
+      length += delim_length;
+    }
+    length += iter->size();
+  }
+  result->reserve(length);
+
+  // Now combine everything.
+  for (ITERATOR iter = start; iter != end; ++iter) {
+    if (iter != start) {
+      result->append(delim, delim_length);
+    }
+    result->append(iter->data(), iter->size());
+  }
+}
+
+void JoinStrings(const vector<string>& components,
+                 const char* delim,
+                 string * result) {
+  JoinStringsIterator(components.begin(), components.end(), delim, result);
+}
+
+// ----------------------------------------------------------------------
+// UnescapeCEscapeSequences()
+//    This does all the unescaping that C does: \ooo, \r, \n, etc
+//    Returns length of resulting string.
+//    The implementation of \x parses any positive number of hex digits,
+//    but it is an error if the value requires more than 8 bits, and the
+//    result is truncated to 8 bits.
+//
+//    The second call stores its errors in a supplied string vector.
+//    If the string vector pointer is NULL, it reports the errors with LOG().
+// ----------------------------------------------------------------------
+
+#define IS_OCTAL_DIGIT(c) (((c) >= '0') && ((c) <= '7'))
+
+// Protocol buffers doesn't ever care about errors, but I don't want to remove
+// the code.
+#define LOG_STRING(LEVEL, VECTOR) GOOGLE_LOG_IF(LEVEL, false)
+
+int UnescapeCEscapeSequences(const char* source, char* dest) {
+  return UnescapeCEscapeSequences(source, dest, NULL);
+}
+
+int UnescapeCEscapeSequences(const char* source, char* dest,
+                             vector<string> *errors) {
+  GOOGLE_DCHECK(errors == NULL) << "Error reporting not implemented.";
+
+  char* d = dest;
+  const char* p = source;
+
+  // Small optimization for case where source = dest and there's no escaping
+  while ( p == d && *p != '\0' && *p != '\\' )
+    p++, d++;
+
+  while (*p != '\0') {
+    if (*p != '\\') {
+      *d++ = *p++;
+    } else {
+      switch ( *++p ) {                    // skip past the '\\'
+        case '\0':
+          LOG_STRING(ERROR, errors) << "String cannot end with \\";
+          *d = '\0';
+          return d - dest;   // we're done with p
+        case 'a':  *d++ = '\a';  break;
+        case 'b':  *d++ = '\b';  break;
+        case 'f':  *d++ = '\f';  break;
+        case 'n':  *d++ = '\n';  break;
+        case 'r':  *d++ = '\r';  break;
+        case 't':  *d++ = '\t';  break;
+        case 'v':  *d++ = '\v';  break;
+        case '\\': *d++ = '\\';  break;
+        case '?':  *d++ = '\?';  break;    // \?  Who knew?
+        case '\'': *d++ = '\'';  break;
+        case '"':  *d++ = '\"';  break;
+        case '0': case '1': case '2': case '3':  // octal digit: 1 to 3 digits
+        case '4': case '5': case '6': case '7': {
+          char ch = *p - '0';
+          if ( IS_OCTAL_DIGIT(p[1]) )
+            ch = ch * 8 + *++p - '0';
+          if ( IS_OCTAL_DIGIT(p[1]) )      // safe (and easy) to do this twice
+            ch = ch * 8 + *++p - '0';      // now points at last digit
+          *d++ = ch;
+          break;
+        }
+        case 'x': case 'X': {
+          if (!isxdigit(p[1])) {
+            if (p[1] == '\0') {
+              LOG_STRING(ERROR, errors) << "String cannot end with \\x";
+            } else {
+              LOG_STRING(ERROR, errors) <<
+                "\\x cannot be followed by non-hex digit: \\" << *p << p[1];
+            }
+            break;
+          }
+          unsigned int ch = 0;
+          const char *hex_start = p;
+          while (isxdigit(p[1]))  // arbitrarily many hex digits
+            ch = (ch << 4) + hex_digit_to_int(*++p);
+          if (ch > 0xFF)
+            LOG_STRING(ERROR, errors) << "Value of " <<
+              "\\" << string(hex_start, p+1-hex_start) << " exceeds 8 bits";
+          *d++ = ch;
+          break;
+        }
+#if 0  // TODO(kenton):  Support \u and \U?  Requires runetochar().
+        case 'u': {
+          // \uhhhh => convert 4 hex digits to UTF-8
+          char32 rune = 0;
+          const char *hex_start = p;
+          for (int i = 0; i < 4; ++i) {
+            if (isxdigit(p[1])) {  // Look one char ahead.
+              rune = (rune << 4) + hex_digit_to_int(*++p);  // Advance p.
+            } else {
+              LOG_STRING(ERROR, errors)
+                << "\\u must be followed by 4 hex digits: \\"
+                <<  string(hex_start, p+1-hex_start);
+              break;
+            }
+          }
+          d += runetochar(d, &rune);
+          break;
+        }
+        case 'U': {
+          // \Uhhhhhhhh => convert 8 hex digits to UTF-8
+          char32 rune = 0;
+          const char *hex_start = p;
+          for (int i = 0; i < 8; ++i) {
+            if (isxdigit(p[1])) {  // Look one char ahead.
+              // Don't change rune until we're sure this
+              // is within the Unicode limit, but do advance p.
+              char32 newrune = (rune << 4) + hex_digit_to_int(*++p);
+              if (newrune > 0x10FFFF) {
+                LOG_STRING(ERROR, errors)
+                  << "Value of \\"
+                  << string(hex_start, p + 1 - hex_start)
+                  << " exceeds Unicode limit (0x10FFFF)";
+                break;
+              } else {
+                rune = newrune;
+              }
+            } else {
+              LOG_STRING(ERROR, errors)
+                << "\\U must be followed by 8 hex digits: \\"
+                <<  string(hex_start, p+1-hex_start);
+              break;
+            }
+          }
+          d += runetochar(d, &rune);
+          break;
+        }
+#endif
+        default:
+          LOG_STRING(ERROR, errors) << "Unknown escape sequence: \\" << *p;
+      }
+      p++;                                 // read past letter we escaped
+    }
+  }
+  *d = '\0';
+  return d - dest;
+}
+
+// ----------------------------------------------------------------------
+// UnescapeCEscapeString()
+//    This does the same thing as UnescapeCEscapeSequences, but creates
+//    a new string. The caller does not need to worry about allocating
+//    a dest buffer. This should be used for non performance critical
+//    tasks such as printing debug messages. It is safe for src and dest
+//    to be the same.
+//
+//    The second call stores its errors in a supplied string vector.
+//    If the string vector pointer is NULL, it reports the errors with LOG().
+//
+//    In the first and second calls, the length of dest is returned. In the
+//    the third call, the new string is returned.
+// ----------------------------------------------------------------------
+int UnescapeCEscapeString(const string& src, string* dest) {
+  return UnescapeCEscapeString(src, dest, NULL);
+}
+
+int UnescapeCEscapeString(const string& src, string* dest,
+                          vector<string> *errors) {
+  scoped_array<char> unescaped(new char[src.size() + 1]);
+  int len = UnescapeCEscapeSequences(src.c_str(), unescaped.get(), errors);
+  GOOGLE_CHECK(dest);
+  dest->assign(unescaped.get(), len);
+  return len;
+}
+
+string UnescapeCEscapeString(const string& src) {
+  scoped_array<char> unescaped(new char[src.size() + 1]);
+  int len = UnescapeCEscapeSequences(src.c_str(), unescaped.get(), NULL);
+  return string(unescaped.get(), len);
+}
+
+// ----------------------------------------------------------------------
+// CEscapeString()
+// CHexEscapeString()
+//    Copies 'src' to 'dest', escaping dangerous characters using
+//    C-style escape sequences. This is very useful for preparing query
+//    flags. 'src' and 'dest' should not overlap. The 'Hex' version uses
+//    hexadecimal rather than octal sequences.
+//    Returns the number of bytes written to 'dest' (not including the \0)
+//    or -1 if there was insufficient space.
+//
+//    Currently only \n, \r, \t, ", ', \ and !isprint() chars are escaped.
+// ----------------------------------------------------------------------
+int CEscapeInternal(const char* src, int src_len, char* dest,
+                    int dest_len, bool use_hex, bool utf8_safe) {
+  const char* src_end = src + src_len;
+  int used = 0;
+  bool last_hex_escape = false; // true if last output char was \xNN
+
+  for (; src < src_end; src++) {
+    if (dest_len - used < 2)   // Need space for two letter escape
+      return -1;
+
+    bool is_hex_escape = false;
+    switch (*src) {
+      case '\n': dest[used++] = '\\'; dest[used++] = 'n';  break;
+      case '\r': dest[used++] = '\\'; dest[used++] = 'r';  break;
+      case '\t': dest[used++] = '\\'; dest[used++] = 't';  break;
+      case '\"': dest[used++] = '\\'; dest[used++] = '\"'; break;
+      case '\'': dest[used++] = '\\'; dest[used++] = '\''; break;
+      case '\\': dest[used++] = '\\'; dest[used++] = '\\'; break;
+      default:
+        // Note that if we emit \xNN and the src character after that is a hex
+        // digit then that digit must be escaped too to prevent it being
+        // interpreted as part of the character code by C.
+        if ((!utf8_safe || static_cast<uint8>(*src) < 0x80) &&
+            (!isprint(*src) ||
+             (last_hex_escape && isxdigit(*src)))) {
+          if (dest_len - used < 4) // need space for 4 letter escape
+            return -1;
+          sprintf(dest + used, (use_hex ? "\\x%02x" : "\\%03o"),
+                  static_cast<uint8>(*src));
+          is_hex_escape = use_hex;
+          used += 4;
+        } else {
+          dest[used++] = *src; break;
+        }
+    }
+    last_hex_escape = is_hex_escape;
+  }
+
+  if (dest_len - used < 1)   // make sure that there is room for \0
+    return -1;
+
+  dest[used] = '\0';   // doesn't count towards return value though
+  return used;
+}
+
+// Calculates the length of the C-style escaped version of 'src'.
+// Assumes that non-printable characters are escaped using octal sequences, and
+// that UTF-8 bytes are not handled specially.
+static inline size_t CEscapedLength(StringPiece src) {
+  static char c_escaped_len[256] = {
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, 2, 4, 4,  // \t, \n, \r
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1,  // ", '
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  // '0'..'9'
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  // 'A'..'O'
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,  // 'P'..'Z', '\'
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  // 'a'..'o'
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4,  // 'p'..'z', DEL
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+  };
+
+  size_t escaped_len = 0;
+  for (int i = 0; i < src.size(); ++i) {
+    unsigned char c = static_cast<unsigned char>(src[i]);
+    escaped_len += c_escaped_len[c];
+  }
+  return escaped_len;
+}
+
+// ----------------------------------------------------------------------
+// Escapes 'src' using C-style escape sequences, and appends the escaped string
+// to 'dest'. This version is faster than calling CEscapeInternal as it computes
+// the required space using a lookup table, and also does not do any special
+// handling for Hex or UTF-8 characters.
+// ----------------------------------------------------------------------
+void CEscapeAndAppend(StringPiece src, string* dest) {
+  size_t escaped_len = CEscapedLength(src);
+  if (escaped_len == src.size()) {
+    dest->append(src.data(), src.size());
+    return;
+  }
+
+  size_t cur_dest_len = dest->size();
+  dest->resize(cur_dest_len + escaped_len);
+  char* append_ptr = &(*dest)[cur_dest_len];
+
+  for (int i = 0; i < src.size(); ++i) {
+    unsigned char c = static_cast<unsigned char>(src[i]);
+    switch (c) {
+      case '\n': *append_ptr++ = '\\'; *append_ptr++ = 'n'; break;
+      case '\r': *append_ptr++ = '\\'; *append_ptr++ = 'r'; break;
+      case '\t': *append_ptr++ = '\\'; *append_ptr++ = 't'; break;
+      case '\"': *append_ptr++ = '\\'; *append_ptr++ = '\"'; break;
+      case '\'': *append_ptr++ = '\\'; *append_ptr++ = '\''; break;
+      case '\\': *append_ptr++ = '\\'; *append_ptr++ = '\\'; break;
+      default:
+        if (!isprint(c)) {
+          *append_ptr++ = '\\';
+          *append_ptr++ = '0' + c / 64;
+          *append_ptr++ = '0' + (c % 64) / 8;
+          *append_ptr++ = '0' + c % 8;
+        } else {
+          *append_ptr++ = c;
+        }
+        break;
+    }
+  }
+}
+
+string CEscape(const string& src) {
+  string dest;
+  CEscapeAndAppend(src, &dest);
+  return dest;
+}
+
+namespace strings {
+
+string Utf8SafeCEscape(const string& src) {
+  const int dest_length = src.size() * 4 + 1; // Maximum possible expansion
+  scoped_array<char> dest(new char[dest_length]);
+  const int len = CEscapeInternal(src.data(), src.size(),
+                                  dest.get(), dest_length, false, true);
+  GOOGLE_DCHECK_GE(len, 0);
+  return string(dest.get(), len);
+}
+
+string CHexEscape(const string& src) {
+  const int dest_length = src.size() * 4 + 1; // Maximum possible expansion
+  scoped_array<char> dest(new char[dest_length]);
+  const int len = CEscapeInternal(src.data(), src.size(),
+                                  dest.get(), dest_length, true, false);
+  GOOGLE_DCHECK_GE(len, 0);
+  return string(dest.get(), len);
+}
+
+}  // namespace strings
+
+// ----------------------------------------------------------------------
+// strto32_adaptor()
+// strtou32_adaptor()
+//    Implementation of strto[u]l replacements that have identical
+//    overflow and underflow characteristics for both ILP-32 and LP-64
+//    platforms, including errno preservation in error-free calls.
+// ----------------------------------------------------------------------
+
+int32 strto32_adaptor(const char *nptr, char **endptr, int base) {
+  const int saved_errno = errno;
+  errno = 0;
+  const long result = strtol(nptr, endptr, base);
+  if (errno == ERANGE && result == LONG_MIN) {
+    return kint32min;
+  } else if (errno == ERANGE && result == LONG_MAX) {
+    return kint32max;
+  } else if (errno == 0 && result < kint32min) {
+    errno = ERANGE;
+    return kint32min;
+  } else if (errno == 0 && result > kint32max) {
+    errno = ERANGE;
+    return kint32max;
+  }
+  if (errno == 0)
+    errno = saved_errno;
+  return static_cast<int32>(result);
+}
+
+uint32 strtou32_adaptor(const char *nptr, char **endptr, int base) {
+  const int saved_errno = errno;
+  errno = 0;
+  const unsigned long result = strtoul(nptr, endptr, base);
+  if (errno == ERANGE && result == ULONG_MAX) {
+    return kuint32max;
+  } else if (errno == 0 && result > kuint32max) {
+    errno = ERANGE;
+    return kuint32max;
+  }
+  if (errno == 0)
+    errno = saved_errno;
+  return static_cast<uint32>(result);
+}
+
+inline bool safe_parse_sign(string* text  /*inout*/,
+                            bool* negative_ptr  /*output*/) {
+  const char* start = text->data();
+  const char* end = start + text->size();
+
+  // Consume whitespace.
+  while (start < end && (start[0] == ' ')) {
+    ++start;
+  }
+  while (start < end && (end[-1] == ' ')) {
+    --end;
+  }
+  if (start >= end) {
+    return false;
+  }
+
+  // Consume sign.
+  *negative_ptr = (start[0] == '-');
+  if (*negative_ptr || start[0] == '+') {
+    ++start;
+    if (start >= end) {
+      return false;
+    }
+  }
+  *text = text->substr(start - text->data(), end - start);
+  return true;
+}
+
+template<typename IntType>
+bool safe_parse_positive_int(
+    string text, IntType* value_p) {
+  int base = 10;
+  IntType value = 0;
+  const IntType vmax = std::numeric_limits<IntType>::max();
+  assert(vmax > 0);
+  assert(vmax >= base);
+  const IntType vmax_over_base = vmax / base;
+  const char* start = text.data();
+  const char* end = start + text.size();
+  // loop over digits
+  for (; start < end; ++start) {
+    unsigned char c = static_cast<unsigned char>(start[0]);
+    int digit = c - '0';
+    if (digit >= base || digit < 0) {
+      *value_p = value;
+      return false;
+    }
+    if (value > vmax_over_base) {
+      *value_p = vmax;
+      return false;
+    }
+    value *= base;
+    if (value > vmax - digit) {
+      *value_p = vmax;
+      return false;
+    }
+    value += digit;
+  }
+  *value_p = value;
+  return true;
+}
+
+template<typename IntType>
+bool safe_parse_negative_int(
+    const string& text, IntType* value_p) {
+  int base = 10;
+  IntType value = 0;
+  const IntType vmin = std::numeric_limits<IntType>::min();
+  assert(vmin < 0);
+  assert(vmin <= 0 - base);
+  IntType vmin_over_base = vmin / base;
+  // 2003 c++ standard [expr.mul]
+  // "... the sign of the remainder is implementation-defined."
+  // Although (vmin/base)*base + vmin%base is always vmin.
+  // 2011 c++ standard tightens the spec but we cannot rely on it.
+  if (vmin % base > 0) {
+    vmin_over_base += 1;
+  }
+  const char* start = text.data();
+  const char* end = start + text.size();
+  // loop over digits
+  for (; start < end; ++start) {
+    unsigned char c = static_cast<unsigned char>(start[0]);
+    int digit = c - '0';
+    if (digit >= base || digit < 0) {
+      *value_p = value;
+      return false;
+    }
+    if (value < vmin_over_base) {
+      *value_p = vmin;
+      return false;
+    }
+    value *= base;
+    if (value < vmin + digit) {
+      *value_p = vmin;
+      return false;
+    }
+    value -= digit;
+  }
+  *value_p = value;
+  return true;
+}
+
+template<typename IntType>
+bool safe_int_internal(string text, IntType* value_p) {
+  *value_p = 0;
+  bool negative;
+  if (!safe_parse_sign(&text, &negative)) {
+    return false;
+  }
+  if (!negative) {
+    return safe_parse_positive_int(text, value_p);
+  } else {
+    return safe_parse_negative_int(text, value_p);
+  }
+}
+
+template<typename IntType>
+bool safe_uint_internal(string text, IntType* value_p) {
+  *value_p = 0;
+  bool negative;
+  if (!safe_parse_sign(&text, &negative) || negative) {
+    return false;
+  }
+  return safe_parse_positive_int(text, value_p);
+}
+
+// ----------------------------------------------------------------------
+// FastIntToBuffer()
+// FastInt64ToBuffer()
+// FastHexToBuffer()
+// FastHex64ToBuffer()
+// FastHex32ToBuffer()
+// ----------------------------------------------------------------------
+
+// Offset into buffer where FastInt64ToBuffer places the end of string
+// null character.  Also used by FastInt64ToBufferLeft.
+static const int kFastInt64ToBufferOffset = 21;
+
+char *FastInt64ToBuffer(int64 i, char* buffer) {
+  // We could collapse the positive and negative sections, but that
+  // would be slightly slower for positive numbers...
+  // 22 bytes is enough to store -2**64, -18446744073709551616.
+  char* p = buffer + kFastInt64ToBufferOffset;
+  *p-- = '\0';
+  if (i >= 0) {
+    do {
+      *p-- = '0' + i % 10;
+      i /= 10;
+    } while (i > 0);
+    return p + 1;
+  } else {
+    // On different platforms, % and / have different behaviors for
+    // negative numbers, so we need to jump through hoops to make sure
+    // we don't divide negative numbers.
+    if (i > -10) {
+      i = -i;
+      *p-- = '0' + i;
+      *p = '-';
+      return p;
+    } else {
+      // Make sure we aren't at MIN_INT, in which case we can't say i = -i
+      i = i + 10;
+      i = -i;
+      *p-- = '0' + i % 10;
+      // Undo what we did a moment ago
+      i = i / 10 + 1;
+      do {
+        *p-- = '0' + i % 10;
+        i /= 10;
+      } while (i > 0);
+      *p = '-';
+      return p;
+    }
+  }
+}
+
+// Offset into buffer where FastInt32ToBuffer places the end of string
+// null character.  Also used by FastInt32ToBufferLeft
+static const int kFastInt32ToBufferOffset = 11;
+
+// Yes, this is a duplicate of FastInt64ToBuffer.  But, we need this for the
+// compiler to generate 32 bit arithmetic instructions.  It's much faster, at
+// least with 32 bit binaries.
+char *FastInt32ToBuffer(int32 i, char* buffer) {
+  // We could collapse the positive and negative sections, but that
+  // would be slightly slower for positive numbers...
+  // 12 bytes is enough to store -2**32, -4294967296.
+  char* p = buffer + kFastInt32ToBufferOffset;
+  *p-- = '\0';
+  if (i >= 0) {
+    do {
+      *p-- = '0' + i % 10;
+      i /= 10;
+    } while (i > 0);
+    return p + 1;
+  } else {
+    // On different platforms, % and / have different behaviors for
+    // negative numbers, so we need to jump through hoops to make sure
+    // we don't divide negative numbers.
+    if (i > -10) {
+      i = -i;
+      *p-- = '0' + i;
+      *p = '-';
+      return p;
+    } else {
+      // Make sure we aren't at MIN_INT, in which case we can't say i = -i
+      i = i + 10;
+      i = -i;
+      *p-- = '0' + i % 10;
+      // Undo what we did a moment ago
+      i = i / 10 + 1;
+      do {
+        *p-- = '0' + i % 10;
+        i /= 10;
+      } while (i > 0);
+      *p = '-';
+      return p;
+    }
+  }
+}
+
+char *FastHexToBuffer(int i, char* buffer) {
+  GOOGLE_CHECK(i >= 0) << "FastHexToBuffer() wants non-negative integers, not " << i;
+
+  static const char *hexdigits = "0123456789abcdef";
+  char *p = buffer + 21;
+  *p-- = '\0';
+  do {
+    *p-- = hexdigits[i & 15];   // mod by 16
+    i >>= 4;                    // divide by 16
+  } while (i > 0);
+  return p + 1;
+}
+
+char *InternalFastHexToBuffer(uint64 value, char* buffer, int num_byte) {
+  static const char *hexdigits = "0123456789abcdef";
+  buffer[num_byte] = '\0';
+  for (int i = num_byte - 1; i >= 0; i--) {
+#ifdef _M_X64
+    // MSVC x64 platform has a bug optimizing the uint32(value) in the #else
+    // block. Given that the uint32 cast was to improve performance on 32-bit
+    // platforms, we use 64-bit '&' directly.
+    buffer[i] = hexdigits[value & 0xf];
+#else
+    buffer[i] = hexdigits[uint32(value) & 0xf];
+#endif
+    value >>= 4;
+  }
+  return buffer;
+}
+
+char *FastHex64ToBuffer(uint64 value, char* buffer) {
+  return InternalFastHexToBuffer(value, buffer, 16);
+}
+
+char *FastHex32ToBuffer(uint32 value, char* buffer) {
+  return InternalFastHexToBuffer(value, buffer, 8);
+}
+
+// ----------------------------------------------------------------------
+// FastInt32ToBufferLeft()
+// FastUInt32ToBufferLeft()
+// FastInt64ToBufferLeft()
+// FastUInt64ToBufferLeft()
+//
+// Like the Fast*ToBuffer() functions above, these are intended for speed.
+// Unlike the Fast*ToBuffer() functions, however, these functions write
+// their output to the beginning of the buffer (hence the name, as the
+// output is left-aligned).  The caller is responsible for ensuring that
+// the buffer has enough space to hold the output.
+//
+// Returns a pointer to the end of the string (i.e. the null character
+// terminating the string).
+// ----------------------------------------------------------------------
+
+static const char two_ASCII_digits[100][2] = {
+  {'0','0'}, {'0','1'}, {'0','2'}, {'0','3'}, {'0','4'},
+  {'0','5'}, {'0','6'}, {'0','7'}, {'0','8'}, {'0','9'},
+  {'1','0'}, {'1','1'}, {'1','2'}, {'1','3'}, {'1','4'},
+  {'1','5'}, {'1','6'}, {'1','7'}, {'1','8'}, {'1','9'},
+  {'2','0'}, {'2','1'}, {'2','2'}, {'2','3'}, {'2','4'},
+  {'2','5'}, {'2','6'}, {'2','7'}, {'2','8'}, {'2','9'},
+  {'3','0'}, {'3','1'}, {'3','2'}, {'3','3'}, {'3','4'},
+  {'3','5'}, {'3','6'}, {'3','7'}, {'3','8'}, {'3','9'},
+  {'4','0'}, {'4','1'}, {'4','2'}, {'4','3'}, {'4','4'},
+  {'4','5'}, {'4','6'}, {'4','7'}, {'4','8'}, {'4','9'},
+  {'5','0'}, {'5','1'}, {'5','2'}, {'5','3'}, {'5','4'},
+  {'5','5'}, {'5','6'}, {'5','7'}, {'5','8'}, {'5','9'},
+  {'6','0'}, {'6','1'}, {'6','2'}, {'6','3'}, {'6','4'},
+  {'6','5'}, {'6','6'}, {'6','7'}, {'6','8'}, {'6','9'},
+  {'7','0'}, {'7','1'}, {'7','2'}, {'7','3'}, {'7','4'},
+  {'7','5'}, {'7','6'}, {'7','7'}, {'7','8'}, {'7','9'},
+  {'8','0'}, {'8','1'}, {'8','2'}, {'8','3'}, {'8','4'},
+  {'8','5'}, {'8','6'}, {'8','7'}, {'8','8'}, {'8','9'},
+  {'9','0'}, {'9','1'}, {'9','2'}, {'9','3'}, {'9','4'},
+  {'9','5'}, {'9','6'}, {'9','7'}, {'9','8'}, {'9','9'}
+};
+
+char* FastUInt32ToBufferLeft(uint32 u, char* buffer) {
+  int digits;
+  const char *ASCII_digits = NULL;
+  // The idea of this implementation is to trim the number of divides to as few
+  // as possible by using multiplication and subtraction rather than mod (%),
+  // and by outputting two digits at a time rather than one.
+  // The huge-number case is first, in the hopes that the compiler will output
+  // that case in one branch-free block of code, and only output conditional
+  // branches into it from below.
+  if (u >= 1000000000) {  // >= 1,000,000,000
+    digits = u / 100000000;  // 100,000,000
+    ASCII_digits = two_ASCII_digits[digits];
+    buffer[0] = ASCII_digits[0];
+    buffer[1] = ASCII_digits[1];
+    buffer += 2;
+sublt100_000_000:
+    u -= digits * 100000000;  // 100,000,000
+lt100_000_000:
+    digits = u / 1000000;  // 1,000,000
+    ASCII_digits = two_ASCII_digits[digits];
+    buffer[0] = ASCII_digits[0];
+    buffer[1] = ASCII_digits[1];
+    buffer += 2;
+sublt1_000_000:
+    u -= digits * 1000000;  // 1,000,000
+lt1_000_000:
+    digits = u / 10000;  // 10,000
+    ASCII_digits = two_ASCII_digits[digits];
+    buffer[0] = ASCII_digits[0];
+    buffer[1] = ASCII_digits[1];
+    buffer += 2;
+sublt10_000:
+    u -= digits * 10000;  // 10,000
+lt10_000:
+    digits = u / 100;
+    ASCII_digits = two_ASCII_digits[digits];
+    buffer[0] = ASCII_digits[0];
+    buffer[1] = ASCII_digits[1];
+    buffer += 2;
+sublt100:
+    u -= digits * 100;
+lt100:
+    digits = u;
+    ASCII_digits = two_ASCII_digits[digits];
+    buffer[0] = ASCII_digits[0];
+    buffer[1] = ASCII_digits[1];
+    buffer += 2;
+done:
+    *buffer = 0;
+    return buffer;
+  }
+
+  if (u < 100) {
+    digits = u;
+    if (u >= 10) goto lt100;
+    *buffer++ = '0' + digits;
+    goto done;
+  }
+  if (u  <  10000) {   // 10,000
+    if (u >= 1000) goto lt10_000;
+    digits = u / 100;
+    *buffer++ = '0' + digits;
+    goto sublt100;
+  }
+  if (u  <  1000000) {   // 1,000,000
+    if (u >= 100000) goto lt1_000_000;
+    digits = u / 10000;  //    10,000
+    *buffer++ = '0' + digits;
+    goto sublt10_000;
+  }
+  if (u  <  100000000) {   // 100,000,000
+    if (u >= 10000000) goto lt100_000_000;
+    digits = u / 1000000;  //   1,000,000
+    *buffer++ = '0' + digits;
+    goto sublt1_000_000;
+  }
+  // we already know that u < 1,000,000,000
+  digits = u / 100000000;   // 100,000,000
+  *buffer++ = '0' + digits;
+  goto sublt100_000_000;
+}
+
+char* FastInt32ToBufferLeft(int32 i, char* buffer) {
+  uint32 u = i;
+  if (i < 0) {
+    *buffer++ = '-';
+    u = -i;
+  }
+  return FastUInt32ToBufferLeft(u, buffer);
+}
+
+char* FastUInt64ToBufferLeft(uint64 u64, char* buffer) {
+  int digits;
+  const char *ASCII_digits = NULL;
+
+  uint32 u = static_cast<uint32>(u64);
+  if (u == u64) return FastUInt32ToBufferLeft(u, buffer);
+
+  uint64 top_11_digits = u64 / 1000000000;
+  buffer = FastUInt64ToBufferLeft(top_11_digits, buffer);
+  u = u64 - (top_11_digits * 1000000000);
+
+  digits = u / 10000000;  // 10,000,000
+  GOOGLE_DCHECK_LT(digits, 100);
+  ASCII_digits = two_ASCII_digits[digits];
+  buffer[0] = ASCII_digits[0];
+  buffer[1] = ASCII_digits[1];
+  buffer += 2;
+  u -= digits * 10000000;  // 10,000,000
+  digits = u / 100000;  // 100,000
+  ASCII_digits = two_ASCII_digits[digits];
+  buffer[0] = ASCII_digits[0];
+  buffer[1] = ASCII_digits[1];
+  buffer += 2;
+  u -= digits * 100000;  // 100,000
+  digits = u / 1000;  // 1,000
+  ASCII_digits = two_ASCII_digits[digits];
+  buffer[0] = ASCII_digits[0];
+  buffer[1] = ASCII_digits[1];
+  buffer += 2;
+  u -= digits * 1000;  // 1,000
+  digits = u / 10;
+  ASCII_digits = two_ASCII_digits[digits];
+  buffer[0] = ASCII_digits[0];
+  buffer[1] = ASCII_digits[1];
+  buffer += 2;
+  u -= digits * 10;
+  digits = u;
+  *buffer++ = '0' + digits;
+  *buffer = 0;
+  return buffer;
+}
+
+char* FastInt64ToBufferLeft(int64 i, char* buffer) {
+  uint64 u = i;
+  if (i < 0) {
+    *buffer++ = '-';
+    u = -i;
+  }
+  return FastUInt64ToBufferLeft(u, buffer);
+}
+
+// ----------------------------------------------------------------------
+// SimpleItoa()
+//    Description: converts an integer to a string.
+//
+//    Return value: string
+// ----------------------------------------------------------------------
+
+string SimpleItoa(int i) {
+  char buffer[kFastToBufferSize];
+  return (sizeof(i) == 4) ?
+    FastInt32ToBuffer(i, buffer) :
+    FastInt64ToBuffer(i, buffer);
+}
+
+string SimpleItoa(unsigned int i) {
+  char buffer[kFastToBufferSize];
+  return string(buffer, (sizeof(i) == 4) ?
+    FastUInt32ToBufferLeft(i, buffer) :
+    FastUInt64ToBufferLeft(i, buffer));
+}
+
+string SimpleItoa(long i) {
+  char buffer[kFastToBufferSize];
+  return (sizeof(i) == 4) ?
+    FastInt32ToBuffer(i, buffer) :
+    FastInt64ToBuffer(i, buffer);
+}
+
+string SimpleItoa(unsigned long i) {
+  char buffer[kFastToBufferSize];
+  return string(buffer, (sizeof(i) == 4) ?
+    FastUInt32ToBufferLeft(i, buffer) :
+    FastUInt64ToBufferLeft(i, buffer));
+}
+
+string SimpleItoa(long long i) {
+  char buffer[kFastToBufferSize];
+  return (sizeof(i) == 4) ?
+    FastInt32ToBuffer(i, buffer) :
+    FastInt64ToBuffer(i, buffer);
+}
+
+string SimpleItoa(unsigned long long i) {
+  char buffer[kFastToBufferSize];
+  return string(buffer, (sizeof(i) == 4) ?
+    FastUInt32ToBufferLeft(i, buffer) :
+    FastUInt64ToBufferLeft(i, buffer));
+}
+
+// ----------------------------------------------------------------------
+// SimpleDtoa()
+// SimpleFtoa()
+// DoubleToBuffer()
+// FloatToBuffer()
+//    We want to print the value without losing precision, but we also do
+//    not want to print more digits than necessary.  This turns out to be
+//    trickier than it sounds.  Numbers like 0.2 cannot be represented
+//    exactly in binary.  If we print 0.2 with a very large precision,
+//    e.g. "%.50g", we get "0.2000000000000000111022302462515654042363167".
+//    On the other hand, if we set the precision too low, we lose
+//    significant digits when printing numbers that actually need them.
+//    It turns out there is no precision value that does the right thing
+//    for all numbers.
+//
+//    Our strategy is to first try printing with a precision that is never
+//    over-precise, then parse the result with strtod() to see if it
+//    matches.  If not, we print again with a precision that will always
+//    give a precise result, but may use more digits than necessary.
+//
+//    An arguably better strategy would be to use the algorithm described
+//    in "How to Print Floating-Point Numbers Accurately" by Steele &
+//    White, e.g. as implemented by David M. Gay's dtoa().  It turns out,
+//    however, that the following implementation is about as fast as
+//    DMG's code.  Furthermore, DMG's code locks mutexes, which means it
+//    will not scale well on multi-core machines.  DMG's code is slightly
+//    more accurate (in that it will never use more digits than
+//    necessary), but this is probably irrelevant for most users.
+//
+//    Rob Pike and Ken Thompson also have an implementation of dtoa() in
+//    third_party/fmt/fltfmt.cc.  Their implementation is similar to this
+//    one in that it makes guesses and then uses strtod() to check them.
+//    Their implementation is faster because they use their own code to
+//    generate the digits in the first place rather than use snprintf(),
+//    thus avoiding format string parsing overhead.  However, this makes
+//    it considerably more complicated than the following implementation,
+//    and it is embedded in a larger library.  If speed turns out to be
+//    an issue, we could re-implement this in terms of their
+//    implementation.
+// ----------------------------------------------------------------------
+
+string SimpleDtoa(double value) {
+  char buffer[kDoubleToBufferSize];
+  return DoubleToBuffer(value, buffer);
+}
+
+string SimpleFtoa(float value) {
+  char buffer[kFloatToBufferSize];
+  return FloatToBuffer(value, buffer);
+}
+
+static inline bool IsValidFloatChar(char c) {
+  return ('0' <= c && c <= '9') ||
+         c == 'e' || c == 'E' ||
+         c == '+' || c == '-';
+}
+
+void DelocalizeRadix(char* buffer) {
+  // Fast check:  if the buffer has a normal decimal point, assume no
+  // translation is needed.
+  if (strchr(buffer, '.') != NULL) return;
+
+  // Find the first unknown character.
+  while (IsValidFloatChar(*buffer)) ++buffer;
+
+  if (*buffer == '\0') {
+    // No radix character found.
+    return;
+  }
+
+  // We are now pointing at the locale-specific radix character.  Replace it
+  // with '.'.
+  *buffer = '.';
+  ++buffer;
+
+  if (!IsValidFloatChar(*buffer) && *buffer != '\0') {
+    // It appears the radix was a multi-byte character.  We need to remove the
+    // extra bytes.
+    char* target = buffer;
+    do { ++buffer; } while (!IsValidFloatChar(*buffer) && *buffer != '\0');
+    memmove(target, buffer, strlen(buffer) + 1);
+  }
+}
+
+char* DoubleToBuffer(double value, char* buffer) {
+  // DBL_DIG is 15 for IEEE-754 doubles, which are used on almost all
+  // platforms these days.  Just in case some system exists where DBL_DIG
+  // is significantly larger -- and risks overflowing our buffer -- we have
+  // this assert.
+  GOOGLE_COMPILE_ASSERT(DBL_DIG < 20, DBL_DIG_is_too_big);
+
+  if (value == numeric_limits<double>::infinity()) {
+    strcpy(buffer, "inf");
+    return buffer;
+  } else if (value == -numeric_limits<double>::infinity()) {
+    strcpy(buffer, "-inf");
+    return buffer;
+  } else if (MathLimits<double>::IsNaN(value)) {
+    strcpy(buffer, "nan");
+    return buffer;
+  }
+
+  int snprintf_result =
+    snprintf(buffer, kDoubleToBufferSize, "%.*g", DBL_DIG, value);
+
+  // The snprintf should never overflow because the buffer is significantly
+  // larger than the precision we asked for.
+  GOOGLE_DCHECK(snprintf_result > 0 && snprintf_result < kDoubleToBufferSize);
+
+  // We need to make parsed_value volatile in order to force the compiler to
+  // write it out to the stack.  Otherwise, it may keep the value in a
+  // register, and if it does that, it may keep it as a long double instead
+  // of a double.  This long double may have extra bits that make it compare
+  // unequal to "value" even though it would be exactly equal if it were
+  // truncated to a double.
+  volatile double parsed_value = strtod(buffer, NULL);
+  if (parsed_value != value) {
+    int snprintf_result =
+      snprintf(buffer, kDoubleToBufferSize, "%.*g", DBL_DIG+2, value);
+
+    // Should never overflow; see above.
+    GOOGLE_DCHECK(snprintf_result > 0 && snprintf_result < kDoubleToBufferSize);
+  }
+
+  DelocalizeRadix(buffer);
+  return buffer;
+}
+
+static int memcasecmp(const char *s1, const char *s2, size_t len) {
+  const unsigned char *us1 = reinterpret_cast<const unsigned char *>(s1);
+  const unsigned char *us2 = reinterpret_cast<const unsigned char *>(s2);
+
+  for ( int i = 0; i < len; i++ ) {
+    const int diff =
+      static_cast<int>(static_cast<unsigned char>(ascii_tolower(us1[i]))) -
+      static_cast<int>(static_cast<unsigned char>(ascii_tolower(us2[i])));
+    if (diff != 0) return diff;
+  }
+  return 0;
+}
+
+inline bool CaseEqual(StringPiece s1, StringPiece s2) {
+  if (s1.size() != s2.size()) return false;
+  return memcasecmp(s1.data(), s2.data(), s1.size()) == 0;
+}
+
+bool safe_strtob(StringPiece str, bool* value) {
+  GOOGLE_CHECK(value != NULL) << "NULL output boolean given.";
+  if (CaseEqual(str, "true") || CaseEqual(str, "t") ||
+      CaseEqual(str, "yes") || CaseEqual(str, "y") ||
+      CaseEqual(str, "1")) {
+    *value = true;
+    return true;
+  }
+  if (CaseEqual(str, "false") || CaseEqual(str, "f") ||
+      CaseEqual(str, "no") || CaseEqual(str, "n") ||
+      CaseEqual(str, "0")) {
+    *value = false;
+    return true;
+  }
+  return false;
+}
+
+bool safe_strtof(const char* str, float* value) {
+  char* endptr;
+  errno = 0;  // errno only gets set on errors
+#if defined(_WIN32) || defined (__hpux)  // has no strtof()
+  *value = strtod(str, &endptr);
+#else
+  *value = strtof(str, &endptr);
+#endif
+  return *str != 0 && *endptr == 0 && errno == 0;
+}
+
+bool safe_strtod(const char* str, double* value) {
+  char* endptr;
+  *value = strtod(str, &endptr);
+  if (endptr != str) {
+    while (ascii_isspace(*endptr)) ++endptr;
+  }
+  // Ignore range errors from strtod.  The values it
+  // returns on underflow and overflow are the right
+  // fallback in a robust setting.
+  return *str != '\0' && *endptr == '\0';
+}
+
+bool safe_strto32(const string& str, int32* value) {
+  return safe_int_internal(str, value);
+}
+
+bool safe_strtou32(const string& str, uint32* value) {
+  return safe_uint_internal(str, value);
+}
+
+bool safe_strto64(const string& str, int64* value) {
+  return safe_int_internal(str, value);
+}
+
+bool safe_strtou64(const string& str, uint64* value) {
+  return safe_uint_internal(str, value);
+}
+
+char* FloatToBuffer(float value, char* buffer) {
+  // FLT_DIG is 6 for IEEE-754 floats, which are used on almost all
+  // platforms these days.  Just in case some system exists where FLT_DIG
+  // is significantly larger -- and risks overflowing our buffer -- we have
+  // this assert.
+  GOOGLE_COMPILE_ASSERT(FLT_DIG < 10, FLT_DIG_is_too_big);
+
+  if (value == numeric_limits<double>::infinity()) {
+    strcpy(buffer, "inf");
+    return buffer;
+  } else if (value == -numeric_limits<double>::infinity()) {
+    strcpy(buffer, "-inf");
+    return buffer;
+  } else if (MathLimits<float>::IsNaN(value)) {
+    strcpy(buffer, "nan");
+    return buffer;
+  }
+
+  int snprintf_result =
+    snprintf(buffer, kFloatToBufferSize, "%.*g", FLT_DIG, value);
+
+  // The snprintf should never overflow because the buffer is significantly
+  // larger than the precision we asked for.
+  GOOGLE_DCHECK(snprintf_result > 0 && snprintf_result < kFloatToBufferSize);
+
+  float parsed_value;
+  if (!safe_strtof(buffer, &parsed_value) || parsed_value != value) {
+    int snprintf_result =
+      snprintf(buffer, kFloatToBufferSize, "%.*g", FLT_DIG+2, value);
+
+    // Should never overflow; see above.
+    GOOGLE_DCHECK(snprintf_result > 0 && snprintf_result < kFloatToBufferSize);
+  }
+
+  DelocalizeRadix(buffer);
+  return buffer;
+}
+
+namespace strings {
+
+AlphaNum::AlphaNum(strings::Hex hex) {
+  char *const end = &digits[kFastToBufferSize];
+  char *writer = end;
+  uint64 value = hex.value;
+  uint64 width = hex.spec;
+  // We accomplish minimum width by OR'ing in 0x10000 to the user's value,
+  // where 0x10000 is the smallest hex number that is as wide as the user
+  // asked for.
+  uint64 mask = ((static_cast<uint64>(1) << (width - 1) * 4)) | value;
+  static const char hexdigits[] = "0123456789abcdef";
+  do {
+    *--writer = hexdigits[value & 0xF];
+    value >>= 4;
+    mask >>= 4;
+  } while (mask != 0);
+  piece_data_ = writer;
+  piece_size_ = end - writer;
+}
+
+}  // namespace strings
+
+// ----------------------------------------------------------------------
+// StrCat()
+//    This merges the given strings or integers, with no delimiter.  This
+//    is designed to be the fastest possible way to construct a string out
+//    of a mix of raw C strings, C++ strings, and integer values.
+// ----------------------------------------------------------------------
+
+// Append is merely a version of memcpy that returns the address of the byte
+// after the area just overwritten.  It comes in multiple flavors to minimize
+// call overhead.
+static char *Append1(char *out, const AlphaNum &x) {
+  memcpy(out, x.data(), x.size());
+  return out + x.size();
+}
+
+static char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) {
+  memcpy(out, x1.data(), x1.size());
+  out += x1.size();
+
+  memcpy(out, x2.data(), x2.size());
+  return out + x2.size();
+}
+
+static char *Append4(char *out,
+                     const AlphaNum &x1, const AlphaNum &x2,
+                     const AlphaNum &x3, const AlphaNum &x4) {
+  memcpy(out, x1.data(), x1.size());
+  out += x1.size();
+
+  memcpy(out, x2.data(), x2.size());
+  out += x2.size();
+
+  memcpy(out, x3.data(), x3.size());
+  out += x3.size();
+
+  memcpy(out, x4.data(), x4.size());
+  return out + x4.size();
+}
+
+string StrCat(const AlphaNum &a, const AlphaNum &b) {
+  string result;
+  result.resize(a.size() + b.size());
+  char *const begin = &*result.begin();
+  char *out = Append2(begin, a, b);
+  GOOGLE_DCHECK_EQ(out, begin + result.size());
+  return result;
+}
+
+string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) {
+  string result;
+  result.resize(a.size() + b.size() + c.size());
+  char *const begin = &*result.begin();
+  char *out = Append2(begin, a, b);
+  out = Append1(out, c);
+  GOOGLE_DCHECK_EQ(out, begin + result.size());
+  return result;
+}
+
+string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
+              const AlphaNum &d) {
+  string result;
+  result.resize(a.size() + b.size() + c.size() + d.size());
+  char *const begin = &*result.begin();
+  char *out = Append4(begin, a, b, c, d);
+  GOOGLE_DCHECK_EQ(out, begin + result.size());
+  return result;
+}
+
+string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
+              const AlphaNum &d, const AlphaNum &e) {
+  string result;
+  result.resize(a.size() + b.size() + c.size() + d.size() + e.size());
+  char *const begin = &*result.begin();
+  char *out = Append4(begin, a, b, c, d);
+  out = Append1(out, e);
+  GOOGLE_DCHECK_EQ(out, begin + result.size());
+  return result;
+}
+
+string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
+              const AlphaNum &d, const AlphaNum &e, const AlphaNum &f) {
+  string result;
+  result.resize(a.size() + b.size() + c.size() + d.size() + e.size() +
+                f.size());
+  char *const begin = &*result.begin();
+  char *out = Append4(begin, a, b, c, d);
+  out = Append2(out, e, f);
+  GOOGLE_DCHECK_EQ(out, begin + result.size());
+  return result;
+}
+
+string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
+              const AlphaNum &d, const AlphaNum &e, const AlphaNum &f,
+              const AlphaNum &g) {
+  string result;
+  result.resize(a.size() + b.size() + c.size() + d.size() + e.size() +
+                f.size() + g.size());
+  char *const begin = &*result.begin();
+  char *out = Append4(begin, a, b, c, d);
+  out = Append2(out, e, f);
+  out = Append1(out, g);
+  GOOGLE_DCHECK_EQ(out, begin + result.size());
+  return result;
+}
+
+string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
+              const AlphaNum &d, const AlphaNum &e, const AlphaNum &f,
+              const AlphaNum &g, const AlphaNum &h) {
+  string result;
+  result.resize(a.size() + b.size() + c.size() + d.size() + e.size() +
+                f.size() + g.size() + h.size());
+  char *const begin = &*result.begin();
+  char *out = Append4(begin, a, b, c, d);
+  out = Append4(out, e, f, g, h);
+  GOOGLE_DCHECK_EQ(out, begin + result.size());
+  return result;
+}
+
+string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
+              const AlphaNum &d, const AlphaNum &e, const AlphaNum &f,
+              const AlphaNum &g, const AlphaNum &h, const AlphaNum &i) {
+  string result;
+  result.resize(a.size() + b.size() + c.size() + d.size() + e.size() +
+                f.size() + g.size() + h.size() + i.size());
+  char *const begin = &*result.begin();
+  char *out = Append4(begin, a, b, c, d);
+  out = Append4(out, e, f, g, h);
+  out = Append1(out, i);
+  GOOGLE_DCHECK_EQ(out, begin + result.size());
+  return result;
+}
+
+// It's possible to call StrAppend with a char * pointer that is partway into
+// the string we're appending to.  However the results of this are random.
+// Therefore, check for this in debug mode.  Use unsigned math so we only have
+// to do one comparison.
+#define GOOGLE_DCHECK_NO_OVERLAP(dest, src) \
+    GOOGLE_DCHECK_GT(uintptr_t((src).data() - (dest).data()), \
+                     uintptr_t((dest).size()))
+
+void StrAppend(string *result, const AlphaNum &a) {
+  GOOGLE_DCHECK_NO_OVERLAP(*result, a);
+  result->append(a.data(), a.size());
+}
+
+void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b) {
+  GOOGLE_DCHECK_NO_OVERLAP(*result, a);
+  GOOGLE_DCHECK_NO_OVERLAP(*result, b);
+  string::size_type old_size = result->size();
+  result->resize(old_size + a.size() + b.size());
+  char *const begin = &*result->begin();
+  char *out = Append2(begin + old_size, a, b);
+  GOOGLE_DCHECK_EQ(out, begin + result->size());
+}
+
+void StrAppend(string *result,
+               const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) {
+  GOOGLE_DCHECK_NO_OVERLAP(*result, a);
+  GOOGLE_DCHECK_NO_OVERLAP(*result, b);
+  GOOGLE_DCHECK_NO_OVERLAP(*result, c);
+  string::size_type old_size = result->size();
+  result->resize(old_size + a.size() + b.size() + c.size());
+  char *const begin = &*result->begin();
+  char *out = Append2(begin + old_size, a, b);
+  out = Append1(out, c);
+  GOOGLE_DCHECK_EQ(out, begin + result->size());
+}
+
+void StrAppend(string *result,
+               const AlphaNum &a, const AlphaNum &b,
+               const AlphaNum &c, const AlphaNum &d) {
+  GOOGLE_DCHECK_NO_OVERLAP(*result, a);
+  GOOGLE_DCHECK_NO_OVERLAP(*result, b);
+  GOOGLE_DCHECK_NO_OVERLAP(*result, c);
+  GOOGLE_DCHECK_NO_OVERLAP(*result, d);
+  string::size_type old_size = result->size();
+  result->resize(old_size + a.size() + b.size() + c.size() + d.size());
+  char *const begin = &*result->begin();
+  char *out = Append4(begin + old_size, a, b, c, d);
+  GOOGLE_DCHECK_EQ(out, begin + result->size());
+}
+
+int GlobalReplaceSubstring(const string& substring,
+                           const string& replacement,
+                           string* s) {
+  GOOGLE_CHECK(s != NULL);
+  if (s->empty() || substring.empty())
+    return 0;
+  string tmp;
+  int num_replacements = 0;
+  int pos = 0;
+  for (int match_pos = s->find(substring.data(), pos, substring.length());
+       match_pos != string::npos;
+       pos = match_pos + substring.length(),
+           match_pos = s->find(substring.data(), pos, substring.length())) {
+    ++num_replacements;
+    // Append the original content before the match.
+    tmp.append(*s, pos, match_pos - pos);
+    // Append the replacement for the match.
+    tmp.append(replacement.begin(), replacement.end());
+  }
+  // Append the content after the last match. If no replacements were made, the
+  // original string is left untouched.
+  if (num_replacements > 0) {
+    tmp.append(*s, pos, s->length() - pos);
+    s->swap(tmp);
+  }
+  return num_replacements;
+}
+
+int CalculateBase64EscapedLen(int input_len, bool do_padding) {
+  // Base64 encodes three bytes of input at a time. If the input is not
+  // divisible by three, we pad as appropriate.
+  //
+  // (from http://tools.ietf.org/html/rfc3548)
+  // Special processing is performed if fewer than 24 bits are available
+  // at the end of the data being encoded.  A full encoding quantum is
+  // always completed at the end of a quantity.  When fewer than 24 input
+  // bits are available in an input group, zero bits are added (on the
+  // right) to form an integral number of 6-bit groups.  Padding at the
+  // end of the data is performed using the '=' character.  Since all base
+  // 64 input is an integral number of octets, only the following cases
+  // can arise:
+
+
+  // Base64 encodes each three bytes of input into four bytes of output.
+  int len = (input_len / 3) * 4;
+
+  if (input_len % 3 == 0) {
+    // (from http://tools.ietf.org/html/rfc3548)
+    // (1) the final quantum of encoding input is an integral multiple of 24
+    // bits; here, the final unit of encoded output will be an integral
+    // multiple of 4 characters with no "=" padding,
+  } else if (input_len % 3 == 1) {
+    // (from http://tools.ietf.org/html/rfc3548)
+    // (2) the final quantum of encoding input is exactly 8 bits; here, the
+    // final unit of encoded output will be two characters followed by two
+    // "=" padding characters, or
+    len += 2;
+    if (do_padding) {
+      len += 2;
+    }
+  } else {  // (input_len % 3 == 2)
+    // (from http://tools.ietf.org/html/rfc3548)
+    // (3) the final quantum of encoding input is exactly 16 bits; here, the
+    // final unit of encoded output will be three characters followed by one
+    // "=" padding character.
+    len += 3;
+    if (do_padding) {
+      len += 1;
+    }
+  }
+
+  assert(len >= input_len);  // make sure we didn't overflow
+  return len;
+}
+
+// Base64Escape does padding, so this calculation includes padding.
+int CalculateBase64EscapedLen(int input_len) {
+  return CalculateBase64EscapedLen(input_len, true);
+}
+
+// ----------------------------------------------------------------------
+// int Base64Unescape() - base64 decoder
+// int Base64Escape() - base64 encoder
+// int WebSafeBase64Unescape() - Google's variation of base64 decoder
+// int WebSafeBase64Escape() - Google's variation of base64 encoder
+//
+// Check out
+// http://tools.ietf.org/html/rfc2045 for formal description, but what we
+// care about is that...
+//   Take the encoded stuff in groups of 4 characters and turn each
+//   character into a code 0 to 63 thus:
+//           A-Z map to 0 to 25
+//           a-z map to 26 to 51
+//           0-9 map to 52 to 61
+//           +(- for WebSafe) maps to 62
+//           /(_ for WebSafe) maps to 63
+//   There will be four numbers, all less than 64 which can be represented
+//   by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
+//   Arrange the 6 digit binary numbers into three bytes as such:
+//   aaaaaabb bbbbcccc ccdddddd
+//   Equals signs (one or two) are used at the end of the encoded block to
+//   indicate that the text was not an integer multiple of three bytes long.
+// ----------------------------------------------------------------------
+
+int Base64UnescapeInternal(const char *src_param, int szsrc,
+                           char *dest, int szdest,
+                           const signed char* unbase64) {
+  static const char kPad64Equals = '=';
+  static const char kPad64Dot = '.';
+
+  int decode = 0;
+  int destidx = 0;
+  int state = 0;
+  unsigned int ch = 0;
+  unsigned int temp = 0;
+
+  // If "char" is signed by default, using *src as an array index results in
+  // accessing negative array elements. Treat the input as a pointer to
+  // unsigned char to avoid this.
+  const unsigned char *src = reinterpret_cast<const unsigned char*>(src_param);
+
+  // The GET_INPUT macro gets the next input character, skipping
+  // over any whitespace, and stopping when we reach the end of the
+  // string or when we read any non-data character.  The arguments are
+  // an arbitrary identifier (used as a label for goto) and the number
+  // of data bytes that must remain in the input to avoid aborting the
+  // loop.
+#define GET_INPUT(label, remain)                 \
+  label:                                         \
+    --szsrc;                                     \
+    ch = *src++;                                 \
+    decode = unbase64[ch];                       \
+    if (decode < 0) {                            \
+      if (ascii_isspace(ch) && szsrc >= remain)  \
+        goto label;                              \
+      state = 4 - remain;                        \
+      break;                                     \
+    }
+
+  // if dest is null, we're just checking to see if it's legal input
+  // rather than producing output.  (I suspect this could just be done
+  // with a regexp...).  We duplicate the loop so this test can be
+  // outside it instead of in every iteration.
+
+  if (dest) {
+    // This loop consumes 4 input bytes and produces 3 output bytes
+    // per iteration.  We can't know at the start that there is enough
+    // data left in the string for a full iteration, so the loop may
+    // break out in the middle; if so 'state' will be set to the
+    // number of input bytes read.
+
+    while (szsrc >= 4)  {
+      // We'll start by optimistically assuming that the next four
+      // bytes of the string (src[0..3]) are four good data bytes
+      // (that is, no nulls, whitespace, padding chars, or illegal
+      // chars).  We need to test src[0..2] for nulls individually
+      // before constructing temp to preserve the property that we
+      // never read past a null in the string (no matter how long
+      // szsrc claims the string is).
+
+      if (!src[0] || !src[1] || !src[2] ||
+          (temp = ((unsigned(unbase64[src[0]]) << 18) |
+                   (unsigned(unbase64[src[1]]) << 12) |
+                   (unsigned(unbase64[src[2]]) << 6) |
+                   (unsigned(unbase64[src[3]])))) & 0x80000000) {
+        // Iff any of those four characters was bad (null, illegal,
+        // whitespace, padding), then temp's high bit will be set
+        // (because unbase64[] is -1 for all bad characters).
+        //
+        // We'll back up and resort to the slower decoder, which knows
+        // how to handle those cases.
+
+        GET_INPUT(first, 4);
+        temp = decode;
+        GET_INPUT(second, 3);
+        temp = (temp << 6) | decode;
+        GET_INPUT(third, 2);
+        temp = (temp << 6) | decode;
+        GET_INPUT(fourth, 1);
+        temp = (temp << 6) | decode;
+      } else {
+        // We really did have four good data bytes, so advance four
+        // characters in the string.
+
+        szsrc -= 4;
+        src += 4;
+        decode = -1;
+        ch = '\0';
+      }
+
+      // temp has 24 bits of input, so write that out as three bytes.
+
+      if (destidx+3 > szdest) return -1;
+      dest[destidx+2] = temp;
+      temp >>= 8;
+      dest[destidx+1] = temp;
+      temp >>= 8;
+      dest[destidx] = temp;
+      destidx += 3;
+    }
+  } else {
+    while (szsrc >= 4)  {
+      if (!src[0] || !src[1] || !src[2] ||
+          (temp = ((unsigned(unbase64[src[0]]) << 18) |
+                   (unsigned(unbase64[src[1]]) << 12) |
+                   (unsigned(unbase64[src[2]]) << 6) |
+                   (unsigned(unbase64[src[3]])))) & 0x80000000) {
+        GET_INPUT(first_no_dest, 4);
+        GET_INPUT(second_no_dest, 3);
+        GET_INPUT(third_no_dest, 2);
+        GET_INPUT(fourth_no_dest, 1);
+      } else {
+        szsrc -= 4;
+        src += 4;
+        decode = -1;
+        ch = '\0';
+      }
+      destidx += 3;
+    }
+  }
+
+#undef GET_INPUT
+
+  // if the loop terminated because we read a bad character, return
+  // now.
+  if (decode < 0 && ch != '\0' &&
+      ch != kPad64Equals && ch != kPad64Dot && !ascii_isspace(ch))
+    return -1;
+
+  if (ch == kPad64Equals || ch == kPad64Dot) {
+    // if we stopped by hitting an '=' or '.', un-read that character -- we'll
+    // look at it again when we count to check for the proper number of
+    // equals signs at the end.
+    ++szsrc;
+    --src;
+  } else {
+    // This loop consumes 1 input byte per iteration.  It's used to
+    // clean up the 0-3 input bytes remaining when the first, faster
+    // loop finishes.  'temp' contains the data from 'state' input
+    // characters read by the first loop.
+    while (szsrc > 0)  {
+      --szsrc;
+      ch = *src++;
+      decode = unbase64[ch];
+      if (decode < 0) {
+        if (ascii_isspace(ch)) {
+          continue;
+        } else if (ch == '\0') {
+          break;
+        } else if (ch == kPad64Equals || ch == kPad64Dot) {
+          // back up one character; we'll read it again when we check
+          // for the correct number of pad characters at the end.
+          ++szsrc;
+          --src;
+          break;
+        } else {
+          return -1;
+        }
+      }
+
+      // Each input character gives us six bits of output.
+      temp = (temp << 6) | decode;
+      ++state;
+      if (state == 4) {
+        // If we've accumulated 24 bits of output, write that out as
+        // three bytes.
+        if (dest) {
+          if (destidx+3 > szdest) return -1;
+          dest[destidx+2] = temp;
+          temp >>= 8;
+          dest[destidx+1] = temp;
+          temp >>= 8;
+          dest[destidx] = temp;
+        }
+        destidx += 3;
+        state = 0;
+        temp = 0;
+      }
+    }
+  }
+
+  // Process the leftover data contained in 'temp' at the end of the input.
+  int expected_equals = 0;
+  switch (state) {
+    case 0:
+      // Nothing left over; output is a multiple of 3 bytes.
+      break;
+
+    case 1:
+      // Bad input; we have 6 bits left over.
+      return -1;
+
+    case 2:
+      // Produce one more output byte from the 12 input bits we have left.
+      if (dest) {
+        if (destidx+1 > szdest) return -1;
+        temp >>= 4;
+        dest[destidx] = temp;
+      }
+      ++destidx;
+      expected_equals = 2;
+      break;
+
+    case 3:
+      // Produce two more output bytes from the 18 input bits we have left.
+      if (dest) {
+        if (destidx+2 > szdest) return -1;
+        temp >>= 2;
+        dest[destidx+1] = temp;
+        temp >>= 8;
+        dest[destidx] = temp;
+      }
+      destidx += 2;
+      expected_equals = 1;
+      break;
+
+    default:
+      // state should have no other values at this point.
+      GOOGLE_LOG(FATAL) << "This can't happen; base64 decoder state = " << state;
+  }
+
+  // The remainder of the string should be all whitespace, mixed with
+  // exactly 0 equals signs, or exactly 'expected_equals' equals
+  // signs.  (Always accepting 0 equals signs is a google extension
+  // not covered in the RFC, as is accepting dot as the pad character.)
+
+  int equals = 0;
+  while (szsrc > 0 && *src) {
+    if (*src == kPad64Equals || *src == kPad64Dot)
+      ++equals;
+    else if (!ascii_isspace(*src))
+      return -1;
+    --szsrc;
+    ++src;
+  }
+
+  return (equals == 0 || equals == expected_equals) ? destidx : -1;
+}
+
+// The arrays below were generated by the following code
+// #include <sys/time.h>
+// #include <stdlib.h>
+// #include <string.h>
+// main()
+// {
+//   static const char Base64[] =
+//     "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+//   char *pos;
+//   int idx, i, j;
+//   printf("    ");
+//   for (i = 0; i < 255; i += 8) {
+//     for (j = i; j < i + 8; j++) {
+//       pos = strchr(Base64, j);
+//       if ((pos == NULL) || (j == 0))
+//         idx = -1;
+//       else
+//         idx = pos - Base64;
+//       if (idx == -1)
+//         printf(" %2d,     ", idx);
+//       else
+//         printf(" %2d/*%c*/,", idx, j);
+//     }
+//     printf("\n    ");
+//   }
+// }
+//
+// where the value of "Base64[]" was replaced by one of the base-64 conversion
+// tables from the functions below.
+static const signed char kUnBase64[] = {
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      62/*+*/, -1,      -1,      -1,      63/*/ */,
+  52/*0*/, 53/*1*/, 54/*2*/, 55/*3*/, 56/*4*/, 57/*5*/, 58/*6*/, 59/*7*/,
+  60/*8*/, 61/*9*/, -1,      -1,      -1,      -1,      -1,      -1,
+  -1,       0/*A*/,  1/*B*/,  2/*C*/,  3/*D*/,  4/*E*/,  5/*F*/,  6/*G*/,
+  07/*H*/,  8/*I*/,  9/*J*/, 10/*K*/, 11/*L*/, 12/*M*/, 13/*N*/, 14/*O*/,
+  15/*P*/, 16/*Q*/, 17/*R*/, 18/*S*/, 19/*T*/, 20/*U*/, 21/*V*/, 22/*W*/,
+  23/*X*/, 24/*Y*/, 25/*Z*/, -1,      -1,      -1,      -1,      -1,
+  -1,      26/*a*/, 27/*b*/, 28/*c*/, 29/*d*/, 30/*e*/, 31/*f*/, 32/*g*/,
+  33/*h*/, 34/*i*/, 35/*j*/, 36/*k*/, 37/*l*/, 38/*m*/, 39/*n*/, 40/*o*/,
+  41/*p*/, 42/*q*/, 43/*r*/, 44/*s*/, 45/*t*/, 46/*u*/, 47/*v*/, 48/*w*/,
+  49/*x*/, 50/*y*/, 51/*z*/, -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1
+};
+static const signed char kUnWebSafeBase64[] = {
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      62/*-*/, -1,      -1,
+  52/*0*/, 53/*1*/, 54/*2*/, 55/*3*/, 56/*4*/, 57/*5*/, 58/*6*/, 59/*7*/,
+  60/*8*/, 61/*9*/, -1,      -1,      -1,      -1,      -1,      -1,
+  -1,       0/*A*/,  1/*B*/,  2/*C*/,  3/*D*/,  4/*E*/,  5/*F*/,  6/*G*/,
+  07/*H*/,  8/*I*/,  9/*J*/, 10/*K*/, 11/*L*/, 12/*M*/, 13/*N*/, 14/*O*/,
+  15/*P*/, 16/*Q*/, 17/*R*/, 18/*S*/, 19/*T*/, 20/*U*/, 21/*V*/, 22/*W*/,
+  23/*X*/, 24/*Y*/, 25/*Z*/, -1,      -1,      -1,      -1,      63/*_*/,
+  -1,      26/*a*/, 27/*b*/, 28/*c*/, 29/*d*/, 30/*e*/, 31/*f*/, 32/*g*/,
+  33/*h*/, 34/*i*/, 35/*j*/, 36/*k*/, 37/*l*/, 38/*m*/, 39/*n*/, 40/*o*/,
+  41/*p*/, 42/*q*/, 43/*r*/, 44/*s*/, 45/*t*/, 46/*u*/, 47/*v*/, 48/*w*/,
+  49/*x*/, 50/*y*/, 51/*z*/, -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1,
+  -1,      -1,      -1,      -1,      -1,      -1,      -1,      -1
+};
+
+int WebSafeBase64Unescape(const char *src, int szsrc, char *dest, int szdest) {
+  return Base64UnescapeInternal(src, szsrc, dest, szdest, kUnWebSafeBase64);
+}
+
+static bool Base64UnescapeInternal(const char* src, int slen, string* dest,
+                                   const signed char* unbase64) {
+  // Determine the size of the output string.  Base64 encodes every 3 bytes into
+  // 4 characters.  any leftover chars are added directly for good measure.
+  // This is documented in the base64 RFC: http://tools.ietf.org/html/rfc3548
+  const int dest_len = 3 * (slen / 4) + (slen % 4);
+
+  dest->resize(dest_len);
+
+  // We are getting the destination buffer by getting the beginning of the
+  // string and converting it into a char *.
+  const int len = Base64UnescapeInternal(src, slen, string_as_array(dest),
+                                         dest_len, unbase64);
+  if (len < 0) {
+    dest->clear();
+    return false;
+  }
+
+  // could be shorter if there was padding
+  GOOGLE_DCHECK_LE(len, dest_len);
+  dest->erase(len);
+
+  return true;
+}
+
+bool Base64Unescape(StringPiece src, string* dest) {
+  return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
+}
+
+bool WebSafeBase64Unescape(StringPiece src, string* dest) {
+  return Base64UnescapeInternal(src.data(), src.size(), dest, kUnWebSafeBase64);
+}
+
+int Base64EscapeInternal(const unsigned char *src, int szsrc,
+                         char *dest, int szdest, const char *base64,
+                         bool do_padding) {
+  static const char kPad64 = '=';
+
+  if (szsrc <= 0) return 0;
+
+  if (szsrc * 4 > szdest * 3) return 0;
+
+  char *cur_dest = dest;
+  const unsigned char *cur_src = src;
+
+  char *limit_dest = dest + szdest;
+  const unsigned char *limit_src = src + szsrc;
+
+  // Three bytes of data encodes to four characters of cyphertext.
+  // So we can pump through three-byte chunks atomically.
+  while (cur_src < limit_src - 3) {  // keep going as long as we have >= 32 bits
+    uint32 in = BigEndian::Load32(cur_src) >> 8;
+
+    cur_dest[0] = base64[in >> 18];
+    in &= 0x3FFFF;
+    cur_dest[1] = base64[in >> 12];
+    in &= 0xFFF;
+    cur_dest[2] = base64[in >> 6];
+    in &= 0x3F;
+    cur_dest[3] = base64[in];
+
+    cur_dest += 4;
+    cur_src += 3;
+  }
+  // To save time, we didn't update szdest or szsrc in the loop.  So do it now.
+  szdest = limit_dest - cur_dest;
+  szsrc = limit_src - cur_src;
+
+  /* now deal with the tail (<=3 bytes) */
+  switch (szsrc) {
+    case 0:
+      // Nothing left; nothing more to do.
+      break;
+    case 1: {
+      // One byte left: this encodes to two characters, and (optionally)
+      // two pad characters to round out the four-character cypherblock.
+      if ((szdest -= 2) < 0) return 0;
+      uint32 in = cur_src[0];
+      cur_dest[0] = base64[in >> 2];
+      in &= 0x3;
+      cur_dest[1] = base64[in << 4];
+      cur_dest += 2;
+      if (do_padding) {
+        if ((szdest -= 2) < 0) return 0;
+        cur_dest[0] = kPad64;
+        cur_dest[1] = kPad64;
+        cur_dest += 2;
+      }
+      break;
+    }
+    case 2: {
+      // Two bytes left: this encodes to three characters, and (optionally)
+      // one pad character to round out the four-character cypherblock.
+      if ((szdest -= 3) < 0) return 0;
+      uint32 in = BigEndian::Load16(cur_src);
+      cur_dest[0] = base64[in >> 10];
+      in &= 0x3FF;
+      cur_dest[1] = base64[in >> 4];
+      in &= 0x00F;
+      cur_dest[2] = base64[in << 2];
+      cur_dest += 3;
+      if (do_padding) {
+        if ((szdest -= 1) < 0) return 0;
+        cur_dest[0] = kPad64;
+        cur_dest += 1;
+      }
+      break;
+    }
+    case 3: {
+      // Three bytes left: same as in the big loop above.  We can't do this in
+      // the loop because the loop above always reads 4 bytes, and the fourth
+      // byte is past the end of the input.
+      if ((szdest -= 4) < 0) return 0;
+      uint32 in = (cur_src[0] << 16) + BigEndian::Load16(cur_src + 1);
+      cur_dest[0] = base64[in >> 18];
+      in &= 0x3FFFF;
+      cur_dest[1] = base64[in >> 12];
+      in &= 0xFFF;
+      cur_dest[2] = base64[in >> 6];
+      in &= 0x3F;
+      cur_dest[3] = base64[in];
+      cur_dest += 4;
+      break;
+    }
+    default:
+      // Should not be reached: blocks of 4 bytes are handled
+      // in the while loop before this switch statement.
+      GOOGLE_LOG(FATAL) << "Logic problem? szsrc = " << szsrc;
+      break;
+  }
+  return (cur_dest - dest);
+}
+
+static const char kBase64Chars[] =
+"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+static const char kWebSafeBase64Chars[] =
+"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+int Base64Escape(const unsigned char *src, int szsrc, char *dest, int szdest) {
+  return Base64EscapeInternal(src, szsrc, dest, szdest, kBase64Chars, true);
+}
+int WebSafeBase64Escape(const unsigned char *src, int szsrc, char *dest,
+                        int szdest, bool do_padding) {
+  return Base64EscapeInternal(src, szsrc, dest, szdest,
+                              kWebSafeBase64Chars, do_padding);
+}
+
+void Base64EscapeInternal(const unsigned char* src, int szsrc,
+                          string* dest, bool do_padding,
+                          const char* base64_chars) {
+  const int calc_escaped_size =
+    CalculateBase64EscapedLen(szsrc, do_padding);
+  dest->resize(calc_escaped_size);
+  const int escaped_len = Base64EscapeInternal(src, szsrc,
+                                               string_as_array(dest),
+                                               dest->size(),
+                                               base64_chars,
+                                               do_padding);
+  GOOGLE_DCHECK_EQ(calc_escaped_size, escaped_len);
+  dest->erase(escaped_len);
+}
+
+void Base64Escape(const unsigned char *src, int szsrc,
+                  string* dest, bool do_padding) {
+  Base64EscapeInternal(src, szsrc, dest, do_padding, kBase64Chars);
+}
+
+void WebSafeBase64Escape(const unsigned char *src, int szsrc,
+                         string *dest, bool do_padding) {
+  Base64EscapeInternal(src, szsrc, dest, do_padding, kWebSafeBase64Chars);
+}
+
+void Base64Escape(StringPiece src, string* dest) {
+  Base64Escape(reinterpret_cast<const unsigned char*>(src.data()),
+               src.size(), dest, true);
+}
+
+void WebSafeBase64Escape(StringPiece src, string* dest) {
+  WebSafeBase64Escape(reinterpret_cast<const unsigned char*>(src.data()),
+                      src.size(), dest, false);
+}
+
+void WebSafeBase64EscapeWithPadding(StringPiece src, string* dest) {
+  WebSafeBase64Escape(reinterpret_cast<const unsigned char*>(src.data()),
+                      src.size(), dest, true);
+}
+
+// Helper to append a Unicode code point to a string as UTF8, without bringing
+// in any external dependencies.
+int EncodeAsUTF8Char(uint32 code_point, char* output) {
+  uint32 tmp = 0;
+  int len = 0;
+  if (code_point <= 0x7f) {
+    tmp = code_point;
+    len = 1;
+  } else if (code_point <= 0x07ff) {
+    tmp = 0x0000c080 |
+        ((code_point & 0x07c0) << 2) |
+        (code_point & 0x003f);
+    len = 2;
+  } else if (code_point <= 0xffff) {
+    tmp = 0x00e08080 |
+        ((code_point & 0xf000) << 4) |
+        ((code_point & 0x0fc0) << 2) |
+        (code_point & 0x003f);
+    len = 3;
+  } else {
+    // UTF-16 is only defined for code points up to 0x10FFFF, and UTF-8 is
+    // normally only defined up to there as well.
+    tmp = 0xf0808080 |
+        ((code_point & 0x1c0000) << 6) |
+        ((code_point & 0x03f000) << 4) |
+        ((code_point & 0x000fc0) << 2) |
+        (code_point & 0x003f);
+    len = 4;
+  }
+  tmp = ghtonl(tmp);
+  memcpy(output, reinterpret_cast<const char*>(&tmp) + sizeof(tmp) - len, len);
+  return len;
+}
+
+// Table of UTF-8 character lengths, based on first byte
+static const unsigned char kUTF8LenTbl[256] = {
+  1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,
+
+  1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,
+  2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,
+  3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3, 4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4
+};
+
+// Return length of a single UTF-8 source character
+int UTF8FirstLetterNumBytes(const char* src, int len) {
+  if (len == 0) {
+    return 0;
+  }
+  return kUTF8LenTbl[*reinterpret_cast<const uint8*>(src)];
+}
+
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/strutil.h b/src/google/protobuf/stubs/strutil.h
new file mode 100644
index 0000000..27d4757
--- /dev/null
+++ b/src/google/protobuf/stubs/strutil.h
@@ -0,0 +1,868 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// from google3/strings/strutil.h
+
+#ifndef GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
+#define GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
+
+#include <stdlib.h>
+#include <vector>
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/stringpiece.h>
+
+namespace google {
+namespace protobuf {
+
+#ifdef _MSC_VER
+#define strtoll  _strtoi64
+#define strtoull _strtoui64
+#elif defined(__DECCXX) && defined(__osf__)
+// HP C++ on Tru64 does not have strtoll, but strtol is already 64-bit.
+#define strtoll strtol
+#define strtoull strtoul
+#endif
+
+// ----------------------------------------------------------------------
+// ascii_isalnum()
+//    Check if an ASCII character is alphanumeric.  We can't use ctype's
+//    isalnum() because it is affected by locale.  This function is applied
+//    to identifiers in the protocol buffer language, not to natural-language
+//    strings, so locale should not be taken into account.
+// ascii_isdigit()
+//    Like above, but only accepts digits.
+// ascii_isspace()
+//    Check if the character is a space character.
+// ----------------------------------------------------------------------
+
+inline bool ascii_isalnum(char c) {
+  return ('a' <= c && c <= 'z') ||
+         ('A' <= c && c <= 'Z') ||
+         ('0' <= c && c <= '9');
+}
+
+inline bool ascii_isdigit(char c) {
+  return ('0' <= c && c <= '9');
+}
+
+inline bool ascii_isspace(char c) {
+  return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' ||
+      c == '\r';
+}
+
+inline bool ascii_isupper(char c) {
+  return c >= 'A' && c <= 'Z';
+}
+
+inline bool ascii_islower(char c) {
+  return c >= 'a' && c <= 'z';
+}
+
+inline char ascii_toupper(char c) {
+  return ascii_islower(c) ? c - ('a' - 'A') : c;
+}
+
+inline char ascii_tolower(char c) {
+  return ascii_isupper(c) ? c + ('a' - 'A') : c;
+}
+
+inline int hex_digit_to_int(char c) {
+  /* Assume ASCII. */
+  int x = static_cast<unsigned char>(c);
+  if (x > '9') {
+    x += 9;
+  }
+  return x & 0xf;
+}
+
+// ----------------------------------------------------------------------
+// HasPrefixString()
+//    Check if a string begins with a given prefix.
+// StripPrefixString()
+//    Given a string and a putative prefix, returns the string minus the
+//    prefix string if the prefix matches, otherwise the original
+//    string.
+// ----------------------------------------------------------------------
+inline bool HasPrefixString(const string& str,
+                            const string& prefix) {
+  return str.size() >= prefix.size() &&
+         str.compare(0, prefix.size(), prefix) == 0;
+}
+
+inline string StripPrefixString(const string& str, const string& prefix) {
+  if (HasPrefixString(str, prefix)) {
+    return str.substr(prefix.size());
+  } else {
+    return str;
+  }
+}
+
+// ----------------------------------------------------------------------
+// HasSuffixString()
+//    Return true if str ends in suffix.
+// StripSuffixString()
+//    Given a string and a putative suffix, returns the string minus the
+//    suffix string if the suffix matches, otherwise the original
+//    string.
+// ----------------------------------------------------------------------
+inline bool HasSuffixString(const string& str,
+                            const string& suffix) {
+  return str.size() >= suffix.size() &&
+         str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
+}
+
+inline string StripSuffixString(const string& str, const string& suffix) {
+  if (HasSuffixString(str, suffix)) {
+    return str.substr(0, str.size() - suffix.size());
+  } else {
+    return str;
+  }
+}
+
+// ----------------------------------------------------------------------
+// StripString
+//    Replaces any occurrence of the character 'remove' (or the characters
+//    in 'remove') with the character 'replacewith'.
+//    Good for keeping html characters or protocol characters (\t) out
+//    of places where they might cause a problem.
+// StripWhitespace
+//    Removes whitespaces from both ends of the given string.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT void StripString(string* s, const char* remove,
+                                    char replacewith);
+
+LIBPROTOBUF_EXPORT void StripWhitespace(string* s);
+
+
+// ----------------------------------------------------------------------
+// LowerString()
+// UpperString()
+// ToUpper()
+//    Convert the characters in "s" to lowercase or uppercase.  ASCII-only:
+//    these functions intentionally ignore locale because they are applied to
+//    identifiers used in the Protocol Buffer language, not to natural-language
+//    strings.
+// ----------------------------------------------------------------------
+
+inline void LowerString(string * s) {
+  string::iterator end = s->end();
+  for (string::iterator i = s->begin(); i != end; ++i) {
+    // tolower() changes based on locale.  We don't want this!
+    if ('A' <= *i && *i <= 'Z') *i += 'a' - 'A';
+  }
+}
+
+inline void UpperString(string * s) {
+  string::iterator end = s->end();
+  for (string::iterator i = s->begin(); i != end; ++i) {
+    // toupper() changes based on locale.  We don't want this!
+    if ('a' <= *i && *i <= 'z') *i += 'A' - 'a';
+  }
+}
+
+inline string ToUpper(const string& s) {
+  string out = s;
+  UpperString(&out);
+  return out;
+}
+
+// ----------------------------------------------------------------------
+// StringReplace()
+//    Give me a string and two patterns "old" and "new", and I replace
+//    the first instance of "old" in the string with "new", if it
+//    exists.  RETURN a new string, regardless of whether the replacement
+//    happened or not.
+// ----------------------------------------------------------------------
+
+LIBPROTOBUF_EXPORT string StringReplace(const string& s, const string& oldsub,
+                                        const string& newsub, bool replace_all);
+
+// ----------------------------------------------------------------------
+// SplitStringUsing()
+//    Split a string using a character delimiter. Append the components
+//    to 'result'.  If there are consecutive delimiters, this function skips
+//    over all of them.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT void SplitStringUsing(const string& full, const char* delim,
+                                         vector<string>* res);
+
+// Split a string using one or more byte delimiters, presented
+// as a nul-terminated c string. Append the components to 'result'.
+// If there are consecutive delimiters, this function will return
+// corresponding empty strings.  If you want to drop the empty
+// strings, try SplitStringUsing().
+//
+// If "full" is the empty string, yields an empty string as the only value.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT void SplitStringAllowEmpty(const string& full,
+                                              const char* delim,
+                                              vector<string>* result);
+
+// ----------------------------------------------------------------------
+// Split()
+//    Split a string using a character delimiter.
+// ----------------------------------------------------------------------
+inline vector<string> Split(
+    const string& full, const char* delim, bool skip_empty = true) {
+  vector<string> result;
+  if (skip_empty) {
+    SplitStringUsing(full, delim, &result);
+  } else {
+    SplitStringAllowEmpty(full, delim, &result);
+  }
+  return result;
+}
+
+// ----------------------------------------------------------------------
+// JoinStrings()
+//    These methods concatenate a vector of strings into a C++ string, using
+//    the C-string "delim" as a separator between components. There are two
+//    flavors of the function, one flavor returns the concatenated string,
+//    another takes a pointer to the target string. In the latter case the
+//    target string is cleared and overwritten.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT void JoinStrings(const vector<string>& components,
+                                    const char* delim, string* result);
+
+inline string JoinStrings(const vector<string>& components,
+                          const char* delim) {
+  string result;
+  JoinStrings(components, delim, &result);
+  return result;
+}
+
+// ----------------------------------------------------------------------
+// UnescapeCEscapeSequences()
+//    Copies "source" to "dest", rewriting C-style escape sequences
+//    -- '\n', '\r', '\\', '\ooo', etc -- to their ASCII
+//    equivalents.  "dest" must be sufficiently large to hold all
+//    the characters in the rewritten string (i.e. at least as large
+//    as strlen(source) + 1 should be safe, since the replacements
+//    are always shorter than the original escaped sequences).  It's
+//    safe for source and dest to be the same.  RETURNS the length
+//    of dest.
+//
+//    It allows hex sequences \xhh, or generally \xhhhhh with an
+//    arbitrary number of hex digits, but all of them together must
+//    specify a value of a single byte (e.g. \x0045 is equivalent
+//    to \x45, and \x1234 is erroneous).
+//
+//    It also allows escape sequences of the form \uhhhh (exactly four
+//    hex digits, upper or lower case) or \Uhhhhhhhh (exactly eight
+//    hex digits, upper or lower case) to specify a Unicode code
+//    point. The dest array will contain the UTF8-encoded version of
+//    that code-point (e.g., if source contains \u2019, then dest will
+//    contain the three bytes 0xE2, 0x80, and 0x99).
+//
+//    Errors: In the first form of the call, errors are reported with
+//    LOG(ERROR). The same is true for the second form of the call if
+//    the pointer to the string vector is NULL; otherwise, error
+//    messages are stored in the vector. In either case, the effect on
+//    the dest array is not defined, but rest of the source will be
+//    processed.
+//    ----------------------------------------------------------------------
+
+LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest);
+LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest,
+                                                vector<string> *errors);
+
+// ----------------------------------------------------------------------
+// UnescapeCEscapeString()
+//    This does the same thing as UnescapeCEscapeSequences, but creates
+//    a new string. The caller does not need to worry about allocating
+//    a dest buffer. This should be used for non performance critical
+//    tasks such as printing debug messages. It is safe for src and dest
+//    to be the same.
+//
+//    The second call stores its errors in a supplied string vector.
+//    If the string vector pointer is NULL, it reports the errors with LOG().
+//
+//    In the first and second calls, the length of dest is returned. In the
+//    the third call, the new string is returned.
+// ----------------------------------------------------------------------
+
+LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src, string* dest);
+LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src, string* dest,
+                                             vector<string> *errors);
+LIBPROTOBUF_EXPORT string UnescapeCEscapeString(const string& src);
+
+// ----------------------------------------------------------------------
+// CEscape()
+//    Escapes 'src' using C-style escape sequences and returns the resulting
+//    string.
+//
+//    Escaped chars: \n, \r, \t, ", ', \, and !isprint().
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT string CEscape(const string& src);
+
+// ----------------------------------------------------------------------
+// CEscapeAndAppend()
+//    Escapes 'src' using C-style escape sequences, and appends the escaped
+//    string to 'dest'.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT void CEscapeAndAppend(StringPiece src, string* dest);
+
+namespace strings {
+// Like CEscape() but does not escape bytes with the upper bit set.
+LIBPROTOBUF_EXPORT string Utf8SafeCEscape(const string& src);
+
+// Like CEscape() but uses hex (\x) escapes instead of octals.
+LIBPROTOBUF_EXPORT string CHexEscape(const string& src);
+}  // namespace strings
+
+// ----------------------------------------------------------------------
+// strto32()
+// strtou32()
+// strto64()
+// strtou64()
+//    Architecture-neutral plug compatible replacements for strtol() and
+//    strtoul().  Long's have different lengths on ILP-32 and LP-64
+//    platforms, so using these is safer, from the point of view of
+//    overflow behavior, than using the standard libc functions.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT int32 strto32_adaptor(const char *nptr, char **endptr,
+                                         int base);
+LIBPROTOBUF_EXPORT uint32 strtou32_adaptor(const char *nptr, char **endptr,
+                                           int base);
+
+inline int32 strto32(const char *nptr, char **endptr, int base) {
+  if (sizeof(int32) == sizeof(long))
+    return strtol(nptr, endptr, base);
+  else
+    return strto32_adaptor(nptr, endptr, base);
+}
+
+inline uint32 strtou32(const char *nptr, char **endptr, int base) {
+  if (sizeof(uint32) == sizeof(unsigned long))
+    return strtoul(nptr, endptr, base);
+  else
+    return strtou32_adaptor(nptr, endptr, base);
+}
+
+// For now, long long is 64-bit on all the platforms we care about, so these
+// functions can simply pass the call to strto[u]ll.
+inline int64 strto64(const char *nptr, char **endptr, int base) {
+  GOOGLE_COMPILE_ASSERT(sizeof(int64) == sizeof(long long),
+                        sizeof_int64_is_not_sizeof_long_long);
+  return strtoll(nptr, endptr, base);
+}
+
+inline uint64 strtou64(const char *nptr, char **endptr, int base) {
+  GOOGLE_COMPILE_ASSERT(sizeof(uint64) == sizeof(unsigned long long),
+                        sizeof_uint64_is_not_sizeof_long_long);
+  return strtoull(nptr, endptr, base);
+}
+
+// ----------------------------------------------------------------------
+// safe_strtob()
+// safe_strto32()
+// safe_strtou32()
+// safe_strto64()
+// safe_strtou64()
+// safe_strtof()
+// safe_strtod()
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT bool safe_strtob(StringPiece str, bool* value);
+
+LIBPROTOBUF_EXPORT bool safe_strto32(const string& str, int32* value);
+LIBPROTOBUF_EXPORT bool safe_strtou32(const string& str, uint32* value);
+inline bool safe_strto32(const char* str, int32* value) {
+  return safe_strto32(string(str), value);
+}
+inline bool safe_strto32(StringPiece str, int32* value) {
+  return safe_strto32(str.ToString(), value);
+}
+inline bool safe_strtou32(const char* str, uint32* value) {
+  return safe_strtou32(string(str), value);
+}
+inline bool safe_strtou32(StringPiece str, uint32* value) {
+  return safe_strtou32(str.ToString(), value);
+}
+
+LIBPROTOBUF_EXPORT bool safe_strto64(const string& str, int64* value);
+LIBPROTOBUF_EXPORT bool safe_strtou64(const string& str, uint64* value);
+inline bool safe_strto64(const char* str, int64* value) {
+  return safe_strto64(string(str), value);
+}
+inline bool safe_strto64(StringPiece str, int64* value) {
+  return safe_strto64(str.ToString(), value);
+}
+inline bool safe_strtou64(const char* str, uint64* value) {
+  return safe_strtou64(string(str), value);
+}
+inline bool safe_strtou64(StringPiece str, uint64* value) {
+  return safe_strtou64(str.ToString(), value);
+}
+
+LIBPROTOBUF_EXPORT bool safe_strtof(const char* str, float* value);
+LIBPROTOBUF_EXPORT bool safe_strtod(const char* str, double* value);
+inline bool safe_strtof(const string& str, float* value) {
+  return safe_strtof(str.c_str(), value);
+}
+inline bool safe_strtod(const string& str, double* value) {
+  return safe_strtod(str.c_str(), value);
+}
+inline bool safe_strtof(StringPiece str, float* value) {
+  return safe_strtof(str.ToString(), value);
+}
+inline bool safe_strtod(StringPiece str, double* value) {
+  return safe_strtod(str.ToString(), value);
+}
+
+// ----------------------------------------------------------------------
+// FastIntToBuffer()
+// FastHexToBuffer()
+// FastHex64ToBuffer()
+// FastHex32ToBuffer()
+// FastTimeToBuffer()
+//    These are intended for speed.  FastIntToBuffer() assumes the
+//    integer is non-negative.  FastHexToBuffer() puts output in
+//    hex rather than decimal.  FastTimeToBuffer() puts the output
+//    into RFC822 format.
+//
+//    FastHex64ToBuffer() puts a 64-bit unsigned value in hex-format,
+//    padded to exactly 16 bytes (plus one byte for '\0')
+//
+//    FastHex32ToBuffer() puts a 32-bit unsigned value in hex-format,
+//    padded to exactly 8 bytes (plus one byte for '\0')
+//
+//       All functions take the output buffer as an arg.
+//    They all return a pointer to the beginning of the output,
+//    which may not be the beginning of the input buffer.
+// ----------------------------------------------------------------------
+
+// Suggested buffer size for FastToBuffer functions.  Also works with
+// DoubleToBuffer() and FloatToBuffer().
+static const int kFastToBufferSize = 32;
+
+LIBPROTOBUF_EXPORT char* FastInt32ToBuffer(int32 i, char* buffer);
+LIBPROTOBUF_EXPORT char* FastInt64ToBuffer(int64 i, char* buffer);
+char* FastUInt32ToBuffer(uint32 i, char* buffer);  // inline below
+char* FastUInt64ToBuffer(uint64 i, char* buffer);  // inline below
+LIBPROTOBUF_EXPORT char* FastHexToBuffer(int i, char* buffer);
+LIBPROTOBUF_EXPORT char* FastHex64ToBuffer(uint64 i, char* buffer);
+LIBPROTOBUF_EXPORT char* FastHex32ToBuffer(uint32 i, char* buffer);
+
+// at least 22 bytes long
+inline char* FastIntToBuffer(int i, char* buffer) {
+  return (sizeof(i) == 4 ?
+          FastInt32ToBuffer(i, buffer) : FastInt64ToBuffer(i, buffer));
+}
+inline char* FastUIntToBuffer(unsigned int i, char* buffer) {
+  return (sizeof(i) == 4 ?
+          FastUInt32ToBuffer(i, buffer) : FastUInt64ToBuffer(i, buffer));
+}
+inline char* FastLongToBuffer(long i, char* buffer) {
+  return (sizeof(i) == 4 ?
+          FastInt32ToBuffer(i, buffer) : FastInt64ToBuffer(i, buffer));
+}
+inline char* FastULongToBuffer(unsigned long i, char* buffer) {
+  return (sizeof(i) == 4 ?
+          FastUInt32ToBuffer(i, buffer) : FastUInt64ToBuffer(i, buffer));
+}
+
+// ----------------------------------------------------------------------
+// FastInt32ToBufferLeft()
+// FastUInt32ToBufferLeft()
+// FastInt64ToBufferLeft()
+// FastUInt64ToBufferLeft()
+//
+// Like the Fast*ToBuffer() functions above, these are intended for speed.
+// Unlike the Fast*ToBuffer() functions, however, these functions write
+// their output to the beginning of the buffer (hence the name, as the
+// output is left-aligned).  The caller is responsible for ensuring that
+// the buffer has enough space to hold the output.
+//
+// Returns a pointer to the end of the string (i.e. the null character
+// terminating the string).
+// ----------------------------------------------------------------------
+
+LIBPROTOBUF_EXPORT char* FastInt32ToBufferLeft(int32 i, char* buffer);
+LIBPROTOBUF_EXPORT char* FastUInt32ToBufferLeft(uint32 i, char* buffer);
+LIBPROTOBUF_EXPORT char* FastInt64ToBufferLeft(int64 i, char* buffer);
+LIBPROTOBUF_EXPORT char* FastUInt64ToBufferLeft(uint64 i, char* buffer);
+
+// Just define these in terms of the above.
+inline char* FastUInt32ToBuffer(uint32 i, char* buffer) {
+  FastUInt32ToBufferLeft(i, buffer);
+  return buffer;
+}
+inline char* FastUInt64ToBuffer(uint64 i, char* buffer) {
+  FastUInt64ToBufferLeft(i, buffer);
+  return buffer;
+}
+
+inline string SimpleBtoa(bool value) {
+  return value ? "true" : "false";
+}
+
+// ----------------------------------------------------------------------
+// SimpleItoa()
+//    Description: converts an integer to a string.
+//
+//    Return value: string
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT string SimpleItoa(int i);
+LIBPROTOBUF_EXPORT string SimpleItoa(unsigned int i);
+LIBPROTOBUF_EXPORT string SimpleItoa(long i);
+LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long i);
+LIBPROTOBUF_EXPORT string SimpleItoa(long long i);
+LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long long i);
+
+// ----------------------------------------------------------------------
+// SimpleDtoa()
+// SimpleFtoa()
+// DoubleToBuffer()
+// FloatToBuffer()
+//    Description: converts a double or float to a string which, if
+//    passed to NoLocaleStrtod(), will produce the exact same original double
+//    (except in case of NaN; all NaNs are considered the same value).
+//    We try to keep the string short but it's not guaranteed to be as
+//    short as possible.
+//
+//    DoubleToBuffer() and FloatToBuffer() write the text to the given
+//    buffer and return it.  The buffer must be at least
+//    kDoubleToBufferSize bytes for doubles and kFloatToBufferSize
+//    bytes for floats.  kFastToBufferSize is also guaranteed to be large
+//    enough to hold either.
+//
+//    Return value: string
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT string SimpleDtoa(double value);
+LIBPROTOBUF_EXPORT string SimpleFtoa(float value);
+
+LIBPROTOBUF_EXPORT char* DoubleToBuffer(double i, char* buffer);
+LIBPROTOBUF_EXPORT char* FloatToBuffer(float i, char* buffer);
+
+// In practice, doubles should never need more than 24 bytes and floats
+// should never need more than 14 (including null terminators), but we
+// overestimate to be safe.
+static const int kDoubleToBufferSize = 32;
+static const int kFloatToBufferSize = 24;
+
+namespace strings {
+
+enum PadSpec {
+  NO_PAD = 1,
+  ZERO_PAD_2,
+  ZERO_PAD_3,
+  ZERO_PAD_4,
+  ZERO_PAD_5,
+  ZERO_PAD_6,
+  ZERO_PAD_7,
+  ZERO_PAD_8,
+  ZERO_PAD_9,
+  ZERO_PAD_10,
+  ZERO_PAD_11,
+  ZERO_PAD_12,
+  ZERO_PAD_13,
+  ZERO_PAD_14,
+  ZERO_PAD_15,
+  ZERO_PAD_16,
+};
+
+struct Hex {
+  uint64 value;
+  enum PadSpec spec;
+  template <class Int>
+  explicit Hex(Int v, PadSpec s = NO_PAD)
+      : spec(s) {
+    // Prevent sign-extension by casting integers to
+    // their unsigned counterparts.
+#ifdef LANG_CXX11
+    static_assert(
+        sizeof(v) == 1 || sizeof(v) == 2 || sizeof(v) == 4 || sizeof(v) == 8,
+        "Unknown integer type");
+#endif
+    value = sizeof(v) == 1 ? static_cast<uint8>(v)
+          : sizeof(v) == 2 ? static_cast<uint16>(v)
+          : sizeof(v) == 4 ? static_cast<uint32>(v)
+          : static_cast<uint64>(v);
+  }
+};
+
+struct LIBPROTOBUF_EXPORT AlphaNum {
+  const char *piece_data_;  // move these to string_ref eventually
+  size_t piece_size_;       // move these to string_ref eventually
+
+  char digits[kFastToBufferSize];
+
+  // No bool ctor -- bools convert to an integral type.
+  // A bool ctor would also convert incoming pointers (bletch).
+
+  AlphaNum(int32 i32)
+      : piece_data_(digits),
+        piece_size_(FastInt32ToBufferLeft(i32, digits) - &digits[0]) {}
+  AlphaNum(uint32 u32)
+      : piece_data_(digits),
+        piece_size_(FastUInt32ToBufferLeft(u32, digits) - &digits[0]) {}
+  AlphaNum(int64 i64)
+      : piece_data_(digits),
+        piece_size_(FastInt64ToBufferLeft(i64, digits) - &digits[0]) {}
+  AlphaNum(uint64 u64)
+      : piece_data_(digits),
+        piece_size_(FastUInt64ToBufferLeft(u64, digits) - &digits[0]) {}
+
+  AlphaNum(float f)
+    : piece_data_(digits), piece_size_(strlen(FloatToBuffer(f, digits))) {}
+  AlphaNum(double f)
+    : piece_data_(digits), piece_size_(strlen(DoubleToBuffer(f, digits))) {}
+
+  AlphaNum(Hex hex);
+
+  AlphaNum(const char* c_str)
+      : piece_data_(c_str), piece_size_(strlen(c_str)) {}
+  // TODO: Add a string_ref constructor, eventually
+  // AlphaNum(const StringPiece &pc) : piece(pc) {}
+
+  AlphaNum(const string& str)
+      : piece_data_(str.data()), piece_size_(str.size()) {}
+
+  AlphaNum(StringPiece str)
+      : piece_data_(str.data()), piece_size_(str.size()) {}
+
+  size_t size() const { return piece_size_; }
+  const char *data() const { return piece_data_; }
+
+ private:
+  // Use ":" not ':'
+  AlphaNum(char c);  // NOLINT(runtime/explicit)
+
+  // Disallow copy and assign.
+  AlphaNum(const AlphaNum&);
+  void operator=(const AlphaNum&);
+};
+
+}  // namespace strings
+
+using strings::AlphaNum;
+
+// ----------------------------------------------------------------------
+// StrCat()
+//    This merges the given strings or numbers, with no delimiter.  This
+//    is designed to be the fastest possible way to construct a string out
+//    of a mix of raw C strings, strings, bool values,
+//    and numeric values.
+//
+//    Don't use this for user-visible strings.  The localization process
+//    works poorly on strings built up out of fragments.
+//
+//    For clarity and performance, don't use StrCat when appending to a
+//    string.  In particular, avoid using any of these (anti-)patterns:
+//      str.append(StrCat(...)
+//      str += StrCat(...)
+//      str = StrCat(str, ...)
+//    where the last is the worse, with the potential to change a loop
+//    from a linear time operation with O(1) dynamic allocations into a
+//    quadratic time operation with O(n) dynamic allocations.  StrAppend
+//    is a better choice than any of the above, subject to the restriction
+//    of StrAppend(&str, a, b, c, ...) that none of the a, b, c, ... may
+//    be a reference into str.
+// ----------------------------------------------------------------------
+
+LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b);
+LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b,
+                                 const AlphaNum& c);
+LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b,
+                                 const AlphaNum& c, const AlphaNum& d);
+LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b,
+                                 const AlphaNum& c, const AlphaNum& d,
+                                 const AlphaNum& e);
+LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b,
+                                 const AlphaNum& c, const AlphaNum& d,
+                                 const AlphaNum& e, const AlphaNum& f);
+LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b,
+                                 const AlphaNum& c, const AlphaNum& d,
+                                 const AlphaNum& e, const AlphaNum& f,
+                                 const AlphaNum& g);
+LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b,
+                                 const AlphaNum& c, const AlphaNum& d,
+                                 const AlphaNum& e, const AlphaNum& f,
+                                 const AlphaNum& g, const AlphaNum& h);
+LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b,
+                                 const AlphaNum& c, const AlphaNum& d,
+                                 const AlphaNum& e, const AlphaNum& f,
+                                 const AlphaNum& g, const AlphaNum& h,
+                                 const AlphaNum& i);
+
+inline string StrCat(const AlphaNum& a) { return string(a.data(), a.size()); }
+
+// ----------------------------------------------------------------------
+// StrAppend()
+//    Same as above, but adds the output to the given string.
+//    WARNING: For speed, StrAppend does not try to check each of its input
+//    arguments to be sure that they are not a subset of the string being
+//    appended to.  That is, while this will work:
+//
+//    string s = "foo";
+//    s += s;
+//
+//    This will not (necessarily) work:
+//
+//    string s = "foo";
+//    StrAppend(&s, s);
+//
+//    Note: while StrCat supports appending up to 9 arguments, StrAppend
+//    is currently limited to 4.  That's rarely an issue except when
+//    automatically transforming StrCat to StrAppend, and can easily be
+//    worked around as consecutive calls to StrAppend are quite efficient.
+// ----------------------------------------------------------------------
+
+LIBPROTOBUF_EXPORT void StrAppend(string* dest, const AlphaNum& a);
+LIBPROTOBUF_EXPORT void StrAppend(string* dest, const AlphaNum& a,
+                                  const AlphaNum& b);
+LIBPROTOBUF_EXPORT void StrAppend(string* dest, const AlphaNum& a,
+                                  const AlphaNum& b, const AlphaNum& c);
+LIBPROTOBUF_EXPORT void StrAppend(string* dest, const AlphaNum& a,
+                                  const AlphaNum& b, const AlphaNum& c,
+                                  const AlphaNum& d);
+
+// ----------------------------------------------------------------------
+// Join()
+//    These methods concatenate a range of components into a C++ string, using
+//    the C-string "delim" as a separator between components.
+// ----------------------------------------------------------------------
+template <typename Iterator>
+void Join(Iterator start, Iterator end,
+          const char* delim, string* result) {
+  for (Iterator it = start; it != end; ++it) {
+    if (it != start) {
+      result->append(delim);
+    }
+    StrAppend(result, *it);
+  }
+}
+
+template <typename Range>
+string Join(const Range& components,
+            const char* delim) {
+  string result;
+  Join(components.begin(), components.end(), delim, &result);
+  return result;
+}
+
+// ----------------------------------------------------------------------
+// ToHex()
+//    Return a lower-case hex string representation of the given integer.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT string ToHex(uint64 num);
+
+// ----------------------------------------------------------------------
+// GlobalReplaceSubstring()
+//    Replaces all instances of a substring in a string.  Does nothing
+//    if 'substring' is empty.  Returns the number of replacements.
+//
+//    NOTE: The string pieces must not overlap s.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT int GlobalReplaceSubstring(const string& substring,
+                                              const string& replacement,
+                                              string* s);
+
+// ----------------------------------------------------------------------
+// Base64Unescape()
+//    Converts "src" which is encoded in Base64 to its binary equivalent and
+//    writes it to "dest". If src contains invalid characters, dest is cleared
+//    and the function returns false. Returns true on success.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT bool Base64Unescape(StringPiece src, string* dest);
+
+// ----------------------------------------------------------------------
+// WebSafeBase64Unescape()
+//    This is a variation of Base64Unescape which uses '-' instead of '+', and
+//    '_' instead of '/'. src is not null terminated, instead specify len. I
+//    recommend that slen<szdest, but we honor szdest anyway.
+//    RETURNS the length of dest, or -1 if src contains invalid chars.
+
+//    The variation that stores into a string clears the string first, and
+//    returns false (with dest empty) if src contains invalid chars; for
+//    this version src and dest must be different strings.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT int WebSafeBase64Unescape(const char* src, int slen,
+                                             char* dest, int szdest);
+LIBPROTOBUF_EXPORT bool WebSafeBase64Unescape(StringPiece src, string* dest);
+
+// Return the length to use for the output buffer given to the base64 escape
+// routines. Make sure to use the same value for do_padding in both.
+// This function may return incorrect results if given input_len values that
+// are extremely high, which should happen rarely.
+LIBPROTOBUF_EXPORT int CalculateBase64EscapedLen(int input_len,
+                                                 bool do_padding);
+// Use this version when calling Base64Escape without a do_padding arg.
+LIBPROTOBUF_EXPORT int CalculateBase64EscapedLen(int input_len);
+
+// ----------------------------------------------------------------------
+// Base64Escape()
+// WebSafeBase64Escape()
+//    Encode "src" to "dest" using base64 encoding.
+//    src is not null terminated, instead specify len.
+//    'dest' should have at least CalculateBase64EscapedLen() length.
+//    RETURNS the length of dest.
+//    The WebSafe variation use '-' instead of '+' and '_' instead of '/'
+//    so that we can place the out in the URL or cookies without having
+//    to escape them.  It also has an extra parameter "do_padding",
+//    which when set to false will prevent padding with "=".
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT int Base64Escape(const unsigned char* src, int slen,
+                                    char* dest, int szdest);
+LIBPROTOBUF_EXPORT int WebSafeBase64Escape(
+    const unsigned char* src, int slen, char* dest,
+    int szdest, bool do_padding);
+// Encode src into dest with padding.
+LIBPROTOBUF_EXPORT void Base64Escape(StringPiece src, string* dest);
+// Encode src into dest web-safely without padding.
+LIBPROTOBUF_EXPORT void WebSafeBase64Escape(StringPiece src, string* dest);
+// Encode src into dest web-safely with padding.
+LIBPROTOBUF_EXPORT void WebSafeBase64EscapeWithPadding(StringPiece src,
+                                                       string* dest);
+
+LIBPROTOBUF_EXPORT void Base64Escape(const unsigned char* src, int szsrc,
+                                     string* dest, bool do_padding);
+LIBPROTOBUF_EXPORT void WebSafeBase64Escape(const unsigned char* src, int szsrc,
+                                            string* dest, bool do_padding);
+
+static const int UTFmax = 4;
+// ----------------------------------------------------------------------
+// EncodeAsUTF8Char()
+//  Helper to append a Unicode code point to a string as UTF8, without bringing
+//  in any external dependencies. The output buffer must be as least 4 bytes
+//  large.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT int EncodeAsUTF8Char(uint32 code_point, char* output);
+
+// ----------------------------------------------------------------------
+// UTF8FirstLetterNumBytes()
+//   Length of the first UTF-8 character.
+// ----------------------------------------------------------------------
+LIBPROTOBUF_EXPORT int UTF8FirstLetterNumBytes(const char* src, int len);
+
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
diff --git a/src/google/protobuf/stubs/strutil_unittest.cc b/src/google/protobuf/stubs/strutil_unittest.cc
new file mode 100644
index 0000000..5d62fc4
--- /dev/null
+++ b/src/google/protobuf/stubs/strutil_unittest.cc
@@ -0,0 +1,810 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+
+#include <google/protobuf/stubs/strutil.h>
+
+#include <locale.h>
+
+#include <google/protobuf/stubs/stl_util.h>
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+#ifdef _WIN32
+#define snprintf _snprintf
+#endif
+
+namespace google {
+namespace protobuf {
+namespace {
+
+// TODO(kenton):  Copy strutil tests from google3?
+
+TEST(StringUtilityTest, ImmuneToLocales) {
+  // Remember the old locale.
+  char* old_locale_cstr = setlocale(LC_NUMERIC, NULL);
+  ASSERT_TRUE(old_locale_cstr != NULL);
+  string old_locale = old_locale_cstr;
+
+  // Set the locale to "C".
+  ASSERT_TRUE(setlocale(LC_NUMERIC, "C") != NULL);
+
+  EXPECT_EQ("1.5", SimpleDtoa(1.5));
+  EXPECT_EQ("1.5", SimpleFtoa(1.5));
+
+  if (setlocale(LC_NUMERIC, "es_ES") == NULL &&
+      setlocale(LC_NUMERIC, "es_ES.utf8") == NULL) {
+    // Some systems may not have the desired locale available.
+    GOOGLE_LOG(WARNING)
+      << "Couldn't set locale to es_ES.  Skipping this test.";
+  } else {
+    EXPECT_EQ("1.5", SimpleDtoa(1.5));
+    EXPECT_EQ("1.5", SimpleFtoa(1.5));
+  }
+
+  // Return to original locale.
+  setlocale(LC_NUMERIC, old_locale.c_str());
+}
+
+#define EXPECT_EQ_ARRAY(len, x, y, msg)                     \
+  for (int j = 0; j < len; ++j) {                           \
+    EXPECT_EQ(x[j], y[j]) << "" # x << " != " # y           \
+                          << " byte " << j << ": " << msg;  \
+  }
+
+static struct {
+  int plain_length;
+  const char* plaintext;
+  const char* cyphertext;
+} base64_tests[] = {
+  // Empty string.
+  { 0, "", ""},
+
+  // Basic bit patterns;
+  // values obtained with "echo -n '...' | uuencode -m test"
+
+  { 1, "\000", "AA==" },
+  { 1, "\001", "AQ==" },
+  { 1, "\002", "Ag==" },
+  { 1, "\004", "BA==" },
+  { 1, "\010", "CA==" },
+  { 1, "\020", "EA==" },
+  { 1, "\040", "IA==" },
+  { 1, "\100", "QA==" },
+  { 1, "\200", "gA==" },
+
+  { 1, "\377", "/w==" },
+  { 1, "\376", "/g==" },
+  { 1, "\375", "/Q==" },
+  { 1, "\373", "+w==" },
+  { 1, "\367", "9w==" },
+  { 1, "\357", "7w==" },
+  { 1, "\337", "3w==" },
+  { 1, "\277", "vw==" },
+  { 1, "\177", "fw==" },
+  { 2, "\000\000", "AAA=" },
+  { 2, "\000\001", "AAE=" },
+  { 2, "\000\002", "AAI=" },
+  { 2, "\000\004", "AAQ=" },
+  { 2, "\000\010", "AAg=" },
+  { 2, "\000\020", "ABA=" },
+  { 2, "\000\040", "ACA=" },
+  { 2, "\000\100", "AEA=" },
+  { 2, "\000\200", "AIA=" },
+  { 2, "\001\000", "AQA=" },
+  { 2, "\002\000", "AgA=" },
+  { 2, "\004\000", "BAA=" },
+  { 2, "\010\000", "CAA=" },
+  { 2, "\020\000", "EAA=" },
+  { 2, "\040\000", "IAA=" },
+  { 2, "\100\000", "QAA=" },
+  { 2, "\200\000", "gAA=" },
+
+  { 2, "\377\377", "//8=" },
+  { 2, "\377\376", "//4=" },
+  { 2, "\377\375", "//0=" },
+  { 2, "\377\373", "//s=" },
+  { 2, "\377\367", "//c=" },
+  { 2, "\377\357", "/+8=" },
+  { 2, "\377\337", "/98=" },
+  { 2, "\377\277", "/78=" },
+  { 2, "\377\177", "/38=" },
+  { 2, "\376\377", "/v8=" },
+  { 2, "\375\377", "/f8=" },
+  { 2, "\373\377", "+/8=" },
+  { 2, "\367\377", "9/8=" },
+  { 2, "\357\377", "7/8=" },
+  { 2, "\337\377", "3/8=" },
+  { 2, "\277\377", "v/8=" },
+  { 2, "\177\377", "f/8=" },
+
+  { 3, "\000\000\000", "AAAA" },
+  { 3, "\000\000\001", "AAAB" },
+  { 3, "\000\000\002", "AAAC" },
+  { 3, "\000\000\004", "AAAE" },
+  { 3, "\000\000\010", "AAAI" },
+  { 3, "\000\000\020", "AAAQ" },
+  { 3, "\000\000\040", "AAAg" },
+  { 3, "\000\000\100", "AABA" },
+  { 3, "\000\000\200", "AACA" },
+  { 3, "\000\001\000", "AAEA" },
+  { 3, "\000\002\000", "AAIA" },
+  { 3, "\000\004\000", "AAQA" },
+  { 3, "\000\010\000", "AAgA" },
+  { 3, "\000\020\000", "ABAA" },
+  { 3, "\000\040\000", "ACAA" },
+  { 3, "\000\100\000", "AEAA" },
+  { 3, "\000\200\000", "AIAA" },
+  { 3, "\001\000\000", "AQAA" },
+  { 3, "\002\000\000", "AgAA" },
+  { 3, "\004\000\000", "BAAA" },
+  { 3, "\010\000\000", "CAAA" },
+  { 3, "\020\000\000", "EAAA" },
+  { 3, "\040\000\000", "IAAA" },
+  { 3, "\100\000\000", "QAAA" },
+  { 3, "\200\000\000", "gAAA" },
+
+  { 3, "\377\377\377", "////" },
+  { 3, "\377\377\376", "///+" },
+  { 3, "\377\377\375", "///9" },
+  { 3, "\377\377\373", "///7" },
+  { 3, "\377\377\367", "///3" },
+  { 3, "\377\377\357", "///v" },
+  { 3, "\377\377\337", "///f" },
+  { 3, "\377\377\277", "//+/" },
+  { 3, "\377\377\177", "//9/" },
+  { 3, "\377\376\377", "//7/" },
+  { 3, "\377\375\377", "//3/" },
+  { 3, "\377\373\377", "//v/" },
+  { 3, "\377\367\377", "//f/" },
+  { 3, "\377\357\377", "/+//" },
+  { 3, "\377\337\377", "/9//" },
+  { 3, "\377\277\377", "/7//" },
+  { 3, "\377\177\377", "/3//" },
+  { 3, "\376\377\377", "/v//" },
+  { 3, "\375\377\377", "/f//" },
+  { 3, "\373\377\377", "+///" },
+  { 3, "\367\377\377", "9///" },
+  { 3, "\357\377\377", "7///" },
+  { 3, "\337\377\377", "3///" },
+  { 3, "\277\377\377", "v///" },
+  { 3, "\177\377\377", "f///" },
+
+  // Random numbers: values obtained with
+  //
+  //  #! /bin/bash
+  //  dd bs=$1 count=1 if=/dev/random of=/tmp/bar.random
+  //  od -N $1 -t o1 /tmp/bar.random
+  //  uuencode -m test < /tmp/bar.random
+  //
+  // where $1 is the number of bytes (2, 3)
+
+  { 2, "\243\361", "o/E=" },
+  { 2, "\024\167", "FHc=" },
+  { 2, "\313\252", "y6o=" },
+  { 2, "\046\041", "JiE=" },
+  { 2, "\145\236", "ZZ4=" },
+  { 2, "\254\325", "rNU=" },
+  { 2, "\061\330", "Mdg=" },
+  { 2, "\245\032", "pRo=" },
+  { 2, "\006\000", "BgA=" },
+  { 2, "\375\131", "/Vk=" },
+  { 2, "\303\210", "w4g=" },
+  { 2, "\040\037", "IB8=" },
+  { 2, "\261\372", "sfo=" },
+  { 2, "\335\014", "3Qw=" },
+  { 2, "\233\217", "m48=" },
+  { 2, "\373\056", "+y4=" },
+  { 2, "\247\232", "p5o=" },
+  { 2, "\107\053", "Rys=" },
+  { 2, "\204\077", "hD8=" },
+  { 2, "\276\211", "vok=" },
+  { 2, "\313\110", "y0g=" },
+  { 2, "\363\376", "8/4=" },
+  { 2, "\251\234", "qZw=" },
+  { 2, "\103\262", "Q7I=" },
+  { 2, "\142\312", "Yso=" },
+  { 2, "\067\211", "N4k=" },
+  { 2, "\220\001", "kAE=" },
+  { 2, "\152\240", "aqA=" },
+  { 2, "\367\061", "9zE=" },
+  { 2, "\133\255", "W60=" },
+  { 2, "\176\035", "fh0=" },
+  { 2, "\032\231", "Gpk=" },
+
+  { 3, "\013\007\144", "Cwdk" },
+  { 3, "\030\112\106", "GEpG" },
+  { 3, "\047\325\046", "J9Um" },
+  { 3, "\310\160\022", "yHAS" },
+  { 3, "\131\100\237", "WUCf" },
+  { 3, "\064\342\134", "NOJc" },
+  { 3, "\010\177\004", "CH8E" },
+  { 3, "\345\147\205", "5WeF" },
+  { 3, "\300\343\360", "wOPw" },
+  { 3, "\061\240\201", "MaCB" },
+  { 3, "\225\333\044", "ldsk" },
+  { 3, "\215\137\352", "jV/q" },
+  { 3, "\371\147\160", "+Wdw" },
+  { 3, "\030\320\051", "GNAp" },
+  { 3, "\044\174\241", "JHyh" },
+  { 3, "\260\127\037", "sFcf" },
+  { 3, "\111\045\033", "SSUb" },
+  { 3, "\202\114\107", "gkxH" },
+  { 3, "\057\371\042", "L/ki" },
+  { 3, "\223\247\244", "k6ek" },
+  { 3, "\047\216\144", "J45k" },
+  { 3, "\203\070\327", "gzjX" },
+  { 3, "\247\140\072", "p2A6" },
+  { 3, "\124\115\116", "VE1O" },
+  { 3, "\157\162\050", "b3Io" },
+  { 3, "\357\223\004", "75ME" },
+  { 3, "\052\117\156", "Kk9u" },
+  { 3, "\347\154\000", "52wA" },
+  { 3, "\303\012\142", "wwpi" },
+  { 3, "\060\035\362", "MB3y" },
+  { 3, "\130\226\361", "WJbx" },
+  { 3, "\173\013\071", "ews5" },
+  { 3, "\336\004\027", "3gQX" },
+  { 3, "\357\366\234", "7/ac" },
+  { 3, "\353\304\111", "68RJ" },
+  { 3, "\024\264\131", "FLRZ" },
+  { 3, "\075\114\251", "PUyp" },
+  { 3, "\315\031\225", "zRmV" },
+  { 3, "\154\201\276", "bIG+" },
+  { 3, "\200\066\072", "gDY6" },
+  { 3, "\142\350\267", "Yui3" },
+  { 3, "\033\000\166", "GwB2" },
+  { 3, "\210\055\077", "iC0/" },
+  { 3, "\341\037\124", "4R9U" },
+  { 3, "\161\103\152", "cUNq" },
+  { 3, "\270\142\131", "uGJZ" },
+  { 3, "\337\076\074", "3z48" },
+  { 3, "\375\106\362", "/Uby" },
+  { 3, "\227\301\127", "l8FX" },
+  { 3, "\340\002\234", "4AKc" },
+  { 3, "\121\064\033", "UTQb" },
+  { 3, "\157\134\143", "b1xj" },
+  { 3, "\247\055\327", "py3X" },
+  { 3, "\340\142\005", "4GIF" },
+  { 3, "\060\260\143", "MLBj" },
+  { 3, "\075\203\170", "PYN4" },
+  { 3, "\143\160\016", "Y3AO" },
+  { 3, "\313\013\063", "ywsz" },
+  { 3, "\174\236\135", "fJ5d" },
+  { 3, "\103\047\026", "QycW" },
+  { 3, "\365\005\343", "9QXj" },
+  { 3, "\271\160\223", "uXCT" },
+  { 3, "\362\255\172", "8q16" },
+  { 3, "\113\012\015", "SwoN" },
+
+  // various lengths, generated by this python script:
+  //
+  // from string import lowercase as lc
+  // for i in range(27):
+  //   print '{ %2d, "%s",%s "%s" },' % (i, lc[:i], ' ' * (26-i),
+  //                                     lc[:i].encode('base64').strip())
+
+  {  0, "",                           "" },
+  {  1, "a",                          "YQ==" },
+  {  2, "ab",                         "YWI=" },
+  {  3, "abc",                        "YWJj" },
+  {  4, "abcd",                       "YWJjZA==" },
+  {  5, "abcde",                      "YWJjZGU=" },
+  {  6, "abcdef",                     "YWJjZGVm" },
+  {  7, "abcdefg",                    "YWJjZGVmZw==" },
+  {  8, "abcdefgh",                   "YWJjZGVmZ2g=" },
+  {  9, "abcdefghi",                  "YWJjZGVmZ2hp" },
+  { 10, "abcdefghij",                 "YWJjZGVmZ2hpag==" },
+  { 11, "abcdefghijk",                "YWJjZGVmZ2hpams=" },
+  { 12, "abcdefghijkl",               "YWJjZGVmZ2hpamts" },
+  { 13, "abcdefghijklm",              "YWJjZGVmZ2hpamtsbQ==" },
+  { 14, "abcdefghijklmn",             "YWJjZGVmZ2hpamtsbW4=" },
+  { 15, "abcdefghijklmno",            "YWJjZGVmZ2hpamtsbW5v" },
+  { 16, "abcdefghijklmnop",           "YWJjZGVmZ2hpamtsbW5vcA==" },
+  { 17, "abcdefghijklmnopq",          "YWJjZGVmZ2hpamtsbW5vcHE=" },
+  { 18, "abcdefghijklmnopqr",         "YWJjZGVmZ2hpamtsbW5vcHFy" },
+  { 19, "abcdefghijklmnopqrs",        "YWJjZGVmZ2hpamtsbW5vcHFycw==" },
+  { 20, "abcdefghijklmnopqrst",       "YWJjZGVmZ2hpamtsbW5vcHFyc3Q=" },
+  { 21, "abcdefghijklmnopqrstu",      "YWJjZGVmZ2hpamtsbW5vcHFyc3R1" },
+  { 22, "abcdefghijklmnopqrstuv",     "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dg==" },
+  { 23, "abcdefghijklmnopqrstuvw",    "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnc=" },
+  { 24, "abcdefghijklmnopqrstuvwx",   "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4" },
+  { 25, "abcdefghijklmnopqrstuvwxy",  "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eQ==" },
+  { 26, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo=" },
+};
+
+static struct {
+  const char* plaintext;
+  const char* cyphertext;
+} base64_strings[] = {
+  // Some google quotes
+  // Cyphertext created with "uuencode (GNU sharutils) 4.6.3"
+  // (Note that we're testing the websafe encoding, though, so if
+  // you add messages, be sure to run "tr -- '+/' '-_'" on the output)
+  { "I was always good at math and science, and I never realized "
+    "that was unusual or somehow undesirable. So one of the things "
+    "I care a lot about is helping to remove that stigma, "
+    "to show girls that you can be feminine, you can like the things "
+    "that girls like, but you can also be really good at technology. "
+    "You can be really good at building things."
+    " - Marissa Meyer, Newsweek, 2010-12-22" "\n",
+
+    "SSB3YXMgYWx3YXlzIGdvb2QgYXQgbWF0aCBhbmQgc2NpZW5jZSwgYW5kIEkg"
+    "bmV2ZXIgcmVhbGl6ZWQgdGhhdCB3YXMgdW51c3VhbCBvciBzb21laG93IHVu"
+    "ZGVzaXJhYmxlLiBTbyBvbmUgb2YgdGhlIHRoaW5ncyBJIGNhcmUgYSBsb3Qg"
+    "YWJvdXQgaXMgaGVscGluZyB0byByZW1vdmUgdGhhdCBzdGlnbWEsIHRvIHNo"
+    "b3cgZ2lybHMgdGhhdCB5b3UgY2FuIGJlIGZlbWluaW5lLCB5b3UgY2FuIGxp"
+    "a2UgdGhlIHRoaW5ncyB0aGF0IGdpcmxzIGxpa2UsIGJ1dCB5b3UgY2FuIGFs"
+    "c28gYmUgcmVhbGx5IGdvb2QgYXQgdGVjaG5vbG9neS4gWW91IGNhbiBiZSBy"
+    "ZWFsbHkgZ29vZCBhdCBidWlsZGluZyB0aGluZ3MuIC0gTWFyaXNzYSBNZXll"
+    "ciwgTmV3c3dlZWssIDIwMTAtMTItMjIK" },
+
+  { "Typical first year for a new cluster: "
+    "~0.5 overheating "
+    "~1 PDU failure "
+    "~1 rack-move "
+    "~1 network rewiring "
+    "~20 rack failures "
+    "~5 racks go wonky "
+    "~8 network maintenances "
+    "~12 router reloads "
+    "~3 router failures "
+    "~dozens of minor 30-second blips for dns "
+    "~1000 individual machine failures "
+    "~thousands of hard drive failures "
+    "slow disks, bad memory, misconfigured machines, flaky machines, etc."
+    " - Jeff Dean, The Joys of Real Hardware" "\n",
+
+    "VHlwaWNhbCBmaXJzdCB5ZWFyIGZvciBhIG5ldyBjbHVzdGVyOiB-MC41IG92"
+    "ZXJoZWF0aW5nIH4xIFBEVSBmYWlsdXJlIH4xIHJhY2stbW92ZSB-MSBuZXR3"
+    "b3JrIHJld2lyaW5nIH4yMCByYWNrIGZhaWx1cmVzIH41IHJhY2tzIGdvIHdv"
+    "bmt5IH44IG5ldHdvcmsgbWFpbnRlbmFuY2VzIH4xMiByb3V0ZXIgcmVsb2Fk"
+    "cyB-MyByb3V0ZXIgZmFpbHVyZXMgfmRvemVucyBvZiBtaW5vciAzMC1zZWNv"
+    "bmQgYmxpcHMgZm9yIGRucyB-MTAwMCBpbmRpdmlkdWFsIG1hY2hpbmUgZmFp"
+    "bHVyZXMgfnRob3VzYW5kcyBvZiBoYXJkIGRyaXZlIGZhaWx1cmVzIHNsb3cg"
+    "ZGlza3MsIGJhZCBtZW1vcnksIG1pc2NvbmZpZ3VyZWQgbWFjaGluZXMsIGZs"
+    "YWt5IG1hY2hpbmVzLCBldGMuIC0gSmVmZiBEZWFuLCBUaGUgSm95cyBvZiBS"
+    "ZWFsIEhhcmR3YXJlCg" },
+
+  { "I'm the head of the webspam team at Google.  "
+    "That means that if you type your name into Google and get porn back, "
+    "it's my fault. Unless you're a porn star, in which case porn is a "
+    "completely reasonable response."
+    " - Matt Cutts, Google Plus" "\n",
+
+    "SSdtIHRoZSBoZWFkIG9mIHRoZSB3ZWJzcGFtIHRlYW0gYXQgR29vZ2xlLiAg"
+    "VGhhdCBtZWFucyB0aGF0IGlmIHlvdSB0eXBlIHlvdXIgbmFtZSBpbnRvIEdv"
+    "b2dsZSBhbmQgZ2V0IHBvcm4gYmFjaywgaXQncyBteSBmYXVsdC4gVW5sZXNz"
+    "IHlvdSdyZSBhIHBvcm4gc3RhciwgaW4gd2hpY2ggY2FzZSBwb3JuIGlzIGEg"
+    "Y29tcGxldGVseSByZWFzb25hYmxlIHJlc3BvbnNlLiAtIE1hdHQgQ3V0dHMs"
+    "IEdvb2dsZSBQbHVzCg" },
+
+  { "It will still be a long time before machines approach human intelligence. "
+    "But luckily, machines don't actually have to be intelligent; "
+    "they just have to fake it. Access to a wealth of information, "
+    "combined with a rudimentary decision-making capacity, "
+    "can often be almost as useful. Of course, the results are better yet "
+    "when coupled with intelligence. A reference librarian with access to "
+    "a good search engine is a formidable tool."
+    " - Craig Silverstein, Siemens Pictures of the Future, Spring 2004" "\n",
+
+    "SXQgd2lsbCBzdGlsbCBiZSBhIGxvbmcgdGltZSBiZWZvcmUgbWFjaGluZXMg"
+    "YXBwcm9hY2ggaHVtYW4gaW50ZWxsaWdlbmNlLiBCdXQgbHVja2lseSwgbWFj"
+    "aGluZXMgZG9uJ3QgYWN0dWFsbHkgaGF2ZSB0byBiZSBpbnRlbGxpZ2VudDsg"
+    "dGhleSBqdXN0IGhhdmUgdG8gZmFrZSBpdC4gQWNjZXNzIHRvIGEgd2VhbHRo"
+    "IG9mIGluZm9ybWF0aW9uLCBjb21iaW5lZCB3aXRoIGEgcnVkaW1lbnRhcnkg"
+    "ZGVjaXNpb24tbWFraW5nIGNhcGFjaXR5LCBjYW4gb2Z0ZW4gYmUgYWxtb3N0"
+    "IGFzIHVzZWZ1bC4gT2YgY291cnNlLCB0aGUgcmVzdWx0cyBhcmUgYmV0dGVy"
+    "IHlldCB3aGVuIGNvdXBsZWQgd2l0aCBpbnRlbGxpZ2VuY2UuIEEgcmVmZXJl"
+    "bmNlIGxpYnJhcmlhbiB3aXRoIGFjY2VzcyB0byBhIGdvb2Qgc2VhcmNoIGVu"
+    "Z2luZSBpcyBhIGZvcm1pZGFibGUgdG9vbC4gLSBDcmFpZyBTaWx2ZXJzdGVp"
+    "biwgU2llbWVucyBQaWN0dXJlcyBvZiB0aGUgRnV0dXJlLCBTcHJpbmcgMjAw"
+    "NAo" },
+
+  // Degenerate edge case
+  { "",
+    "" },
+};
+
+TEST(Base64, EscapeAndUnescape) {
+  // Check the short strings; this tests the math (and boundaries)
+  for (int i = 0; i < sizeof(base64_tests) / sizeof(base64_tests[0]); ++i) {
+    char encode_buffer[100];
+    int encode_length;
+    char decode_buffer[100];
+    int decode_length;
+    int cypher_length;
+    string decode_str;
+
+    const unsigned char* unsigned_plaintext =
+      reinterpret_cast<const unsigned char*>(base64_tests[i].plaintext);
+
+    StringPiece plaintext(base64_tests[i].plaintext,
+                          base64_tests[i].plain_length);
+
+    cypher_length = strlen(base64_tests[i].cyphertext);
+
+    // The basic escape function:
+    memset(encode_buffer, 0, sizeof(encode_buffer));
+    encode_length = Base64Escape(unsigned_plaintext,
+                                 base64_tests[i].plain_length,
+                                 encode_buffer,
+                                 sizeof(encode_buffer));
+    //    Is it of the expected length?
+    EXPECT_EQ(encode_length, cypher_length);
+    // Would it have been okay to allocate only CalculateBase64EscapeLen()?
+    EXPECT_EQ(CalculateBase64EscapedLen(base64_tests[i].plain_length),
+              encode_length);
+
+    //    Is it the expected encoded value?
+    ASSERT_STREQ(encode_buffer, base64_tests[i].cyphertext);
+
+    // If we encode it into a buffer of exactly the right length...
+    memset(encode_buffer, 0, sizeof(encode_buffer));
+    encode_length = Base64Escape(unsigned_plaintext,
+                                          base64_tests[i].plain_length,
+                                          encode_buffer,
+                                          cypher_length);
+    //    Is it still of the expected length?
+    EXPECT_EQ(encode_length, cypher_length);
+
+    //    And is the value still correct?  (i.e., not losing the last byte)
+    EXPECT_STREQ(encode_buffer, base64_tests[i].cyphertext);
+
+    // If we decode it back:
+    decode_str.clear();
+    EXPECT_TRUE(Base64Unescape(
+        StringPiece(encode_buffer, cypher_length), &decode_str));
+
+    //    Is it of the expected length?
+    EXPECT_EQ(base64_tests[i].plain_length, decode_str.length());
+
+    //    Is it the expected decoded value?
+    EXPECT_EQ(plaintext, decode_str);
+
+    // Let's try with a pre-populated string.
+    string encoded("this junk should be ignored");
+    Base64Escape(string(base64_tests[i].plaintext,
+                        base64_tests[i].plain_length),
+                 &encoded);
+    EXPECT_EQ(encoded, string(encode_buffer, cypher_length));
+
+    string decoded("this junk should be ignored");
+    EXPECT_TRUE(Base64Unescape(
+        StringPiece(encode_buffer, cypher_length), &decoded));
+    EXPECT_EQ(decoded.size(), base64_tests[i].plain_length);
+    EXPECT_EQ_ARRAY(decoded.size(), decoded, base64_tests[i].plaintext, i);
+
+    // Our decoder treats the padding '=' characters at the end as
+    // optional (but if there are any, there must be the correct
+    // number of them.)  If encode_buffer has any, run some additional
+    // tests that fiddle with them.
+    char* first_equals = strchr(encode_buffer, '=');
+    if (first_equals) {
+      // How many equals signs does the string start with?
+      int equals = (*(first_equals+1) == '=') ? 2 : 1;
+
+      // Try chopping off the equals sign(s) entirely.  The decoder
+      // should still be okay with this.
+      string decoded2("this junk should also be ignored");
+      *first_equals = '\0';
+      EXPECT_TRUE(Base64Unescape(
+          StringPiece(encode_buffer, first_equals - encode_buffer), &decoded2));
+      EXPECT_EQ(decoded.size(), base64_tests[i].plain_length);
+      EXPECT_EQ_ARRAY(decoded.size(), decoded, base64_tests[i].plaintext, i);
+
+      // Now test chopping off the equals sign(s) and adding
+      // whitespace.  Our decoder should still accept this.
+      decoded2.assign("this junk should be ignored");
+      *first_equals = ' ';
+      *(first_equals+1) = '\0';
+      EXPECT_TRUE(Base64Unescape(
+          StringPiece(encode_buffer, first_equals - encode_buffer + 1),
+          &decoded2));
+      EXPECT_EQ(decoded.size(), base64_tests[i].plain_length);
+      EXPECT_EQ_ARRAY(decoded.size(), decoded, base64_tests[i].plaintext, i);
+
+      // Now stick a bad character at the end of the string.  The decoder
+      // should refuse this string.
+      decoded2.assign("this junk should be ignored");
+      *first_equals = '?';
+      *(first_equals+1) = '\0';
+      EXPECT_TRUE(
+          !Base64Unescape(
+              StringPiece(encode_buffer, first_equals - encode_buffer + 1),
+              &decoded2));
+
+      int len;
+
+      // Test whitespace mixed with the padding.  (eg "AA = = ")  The
+      // decoder should accept this.
+      if (equals == 2) {
+        snprintf(first_equals, 6, " = = ");
+        len = first_equals - encode_buffer + 5;
+      } else {
+        snprintf(first_equals, 6, " = ");
+        len = first_equals - encode_buffer + 3;
+      }
+      decoded2.assign("this junk should be ignored");
+      EXPECT_TRUE(
+          Base64Unescape(StringPiece(encode_buffer, len), &decoded2));
+      EXPECT_EQ(decoded.size(), base64_tests[i].plain_length);
+      EXPECT_EQ_ARRAY(decoded.size(), decoded, base64_tests[i].plaintext, i);
+
+      // Test whitespace mixed with the padding, but with the wrong
+      // number of equals signs (eg "AA = ").  The decoder should
+      // refuse these strings.
+      if (equals == 1) {
+        snprintf(first_equals, 6, " = = ");
+        len = first_equals - encode_buffer + 5;
+      } else {
+        snprintf(first_equals, 6, " = ");
+        len = first_equals - encode_buffer + 3;
+      }
+      EXPECT_TRUE(
+          !Base64Unescape(StringPiece(encode_buffer, len), &decoded2));
+    }
+
+    // Cool! the basic Base64 encoder/decoder works.
+    // Let's try the alternate alphabet: tr -- '+/' '-_'
+
+    char websafe[100];
+    memset(websafe, 0, sizeof(websafe));
+    strncpy(websafe, base64_tests[i].cyphertext, cypher_length);
+    for (int c = 0; c < sizeof(websafe); ++c) {
+      if ('+' == websafe[c]) { websafe[c] = '-'; }
+      if ('/' == websafe[c]) { websafe[c] = '_'; }
+    }
+
+    // The websafe escape function:
+    memset(encode_buffer, 0, sizeof(encode_buffer));
+    encode_length = WebSafeBase64Escape(unsigned_plaintext,
+                                                 base64_tests[i].plain_length,
+                                                 encode_buffer,
+                                                 sizeof(encode_buffer),
+                                                 true);
+    //    Is it of the expected length?
+    EXPECT_EQ(encode_length, cypher_length);
+    EXPECT_EQ(
+        CalculateBase64EscapedLen(base64_tests[i].plain_length, true),
+        encode_length);
+
+    //    Is it the expected encoded value?
+    EXPECT_STREQ(encode_buffer, websafe);
+
+    //    If we encode it into a buffer of exactly the right length...
+    memset(encode_buffer, 0, sizeof(encode_buffer));
+    encode_length = WebSafeBase64Escape(unsigned_plaintext,
+                                                 base64_tests[i].plain_length,
+                                                 encode_buffer,
+                                                 cypher_length,
+                                                 true);
+    //    Is it still of the expected length?
+    EXPECT_EQ(encode_length, cypher_length);
+
+    //    And is the value still correct?  (i.e., not losing the last byte)
+    EXPECT_STREQ(encode_buffer, websafe);
+
+    //    Let's try the string version of the encoder
+    encoded = "this junk should be ignored";
+    WebSafeBase64Escape(
+        unsigned_plaintext, base64_tests[i].plain_length,
+        &encoded, true);
+    EXPECT_EQ(encoded.size(), cypher_length);
+    EXPECT_STREQ(encoded.c_str(), websafe);
+
+    //    If we decode it back:
+    memset(decode_buffer, 0, sizeof(decode_buffer));
+    decode_length = WebSafeBase64Unescape(encode_buffer,
+                                                   cypher_length,
+                                                   decode_buffer,
+                                                   sizeof(decode_buffer));
+
+    //    Is it of the expected length?
+    EXPECT_EQ(decode_length, base64_tests[i].plain_length);
+
+    //    Is it the expected decoded value?
+    EXPECT_EQ(0,
+              memcmp(decode_buffer, base64_tests[i].plaintext, decode_length));
+
+    //    If we decode it into a buffer of exactly the right length...
+    memset(decode_buffer, 0, sizeof(decode_buffer));
+    decode_length = WebSafeBase64Unescape(encode_buffer,
+                                                   cypher_length,
+                                                   decode_buffer,
+                                                   decode_length);
+
+    //    Is it still of the expected length?
+    EXPECT_EQ(decode_length, base64_tests[i].plain_length);
+
+    //    And is it the expected decoded value?
+    EXPECT_EQ(0,
+              memcmp(decode_buffer, base64_tests[i].plaintext, decode_length));
+
+    // Try using '.' for the pad character.
+    for (int c = cypher_length - 1; c >= 0 && '=' == encode_buffer[c]; --c) {
+      encode_buffer[c] = '.';
+    }
+
+    // If we decode it back:
+    memset(decode_buffer, 0, sizeof(decode_buffer));
+    decode_length = WebSafeBase64Unescape(encode_buffer,
+                                                   cypher_length,
+                                                   decode_buffer,
+                                                   sizeof(decode_buffer));
+
+    // Is it of the expected length?
+    EXPECT_EQ(decode_length, base64_tests[i].plain_length);
+
+    // Is it the expected decoded value?
+    EXPECT_EQ(0,
+              memcmp(decode_buffer, base64_tests[i].plaintext, decode_length));
+
+    // If we decode it into a buffer of exactly the right length...
+    memset(decode_buffer, 0, sizeof(decode_buffer));
+    decode_length = WebSafeBase64Unescape(encode_buffer,
+                                                   cypher_length,
+                                                   decode_buffer,
+                                                   decode_length);
+
+    // Is it still of the expected length?
+    EXPECT_EQ(decode_length, base64_tests[i].plain_length);
+
+    // And is it the expected decoded value?
+    EXPECT_EQ(0,
+              memcmp(decode_buffer, base64_tests[i].plaintext, decode_length));
+
+    // Let's try the string version of the decoder
+    decoded = "this junk should be ignored";
+    EXPECT_TRUE(WebSafeBase64Unescape(
+        StringPiece(encode_buffer, cypher_length), &decoded));
+    EXPECT_EQ(decoded.size(), base64_tests[i].plain_length);
+    EXPECT_EQ_ARRAY(decoded.size(), decoded, base64_tests[i].plaintext, i);
+
+    // Okay! the websafe Base64 encoder/decoder works.
+    // Let's try the unpadded version
+
+    for (int c = 0; c < sizeof(websafe); ++c) {
+      if ('=' == websafe[c]) {
+        websafe[c] = '\0';
+        cypher_length = c;
+        break;
+      }
+    }
+
+    // The websafe escape function:
+    memset(encode_buffer, 0, sizeof(encode_buffer));
+    encode_length = WebSafeBase64Escape(unsigned_plaintext,
+                                                 base64_tests[i].plain_length,
+                                                 encode_buffer,
+                                                 sizeof(encode_buffer),
+                                                 false);
+    //    Is it of the expected length?
+    EXPECT_EQ(encode_length, cypher_length);
+    EXPECT_EQ(
+        CalculateBase64EscapedLen(base64_tests[i].plain_length, false),
+        encode_length);
+
+    //    Is it the expected encoded value?
+    EXPECT_STREQ(encode_buffer, websafe);
+
+    //    If we encode it into a buffer of exactly the right length...
+    memset(encode_buffer, 0, sizeof(encode_buffer));
+    encode_length = WebSafeBase64Escape(unsigned_plaintext,
+                                                 base64_tests[i].plain_length,
+                                                 encode_buffer,
+                                                 cypher_length,
+                                                 false);
+    //    Is it still of the expected length?
+    EXPECT_EQ(encode_length, cypher_length);
+
+    //    And is the value still correct?  (i.e., not losing the last byte)
+    EXPECT_STREQ(encode_buffer, websafe);
+
+    // Let's try the (other) string version of the encoder
+    string plain(base64_tests[i].plaintext, base64_tests[i].plain_length);
+    encoded = "this junk should be ignored";
+    WebSafeBase64Escape(plain, &encoded);
+    EXPECT_EQ(encoded.size(), cypher_length);
+    EXPECT_STREQ(encoded.c_str(), websafe);
+
+    //    If we decode it back:
+    memset(decode_buffer, 0, sizeof(decode_buffer));
+    decode_length = WebSafeBase64Unescape(encode_buffer,
+                                                   cypher_length,
+                                                   decode_buffer,
+                                                   sizeof(decode_buffer));
+
+    //    Is it of the expected length?
+    EXPECT_EQ(decode_length, base64_tests[i].plain_length);
+
+    //    Is it the expected decoded value?
+    EXPECT_EQ(0,
+              memcmp(decode_buffer, base64_tests[i].plaintext, decode_length));
+
+    //    If we decode it into a buffer of exactly the right length...
+    memset(decode_buffer, 0, sizeof(decode_buffer));
+    decode_length = WebSafeBase64Unescape(encode_buffer,
+                                                   cypher_length,
+                                                   decode_buffer,
+                                                   decode_length);
+
+    //    Is it still of the expected length?
+    EXPECT_EQ(decode_length, base64_tests[i].plain_length);
+
+    //    And is it the expected decoded value?
+    EXPECT_EQ(0,
+              memcmp(decode_buffer, base64_tests[i].plaintext, decode_length));
+
+
+    // Let's try the string version of the decoder
+    decoded = "this junk should be ignored";
+    EXPECT_TRUE(WebSafeBase64Unescape(
+        StringPiece(encode_buffer, cypher_length), &decoded));
+    EXPECT_EQ(decoded.size(), base64_tests[i].plain_length);
+    EXPECT_EQ_ARRAY(decoded.size(), decoded, base64_tests[i].plaintext, i);
+
+    // This value works.  Try the next.
+  }
+
+  // Now try the long strings, this tests the streaming
+  for (int i = 0; i < sizeof(base64_strings) / sizeof(base64_strings[0]);
+       ++i) {
+    const unsigned char* unsigned_plaintext =
+      reinterpret_cast<const unsigned char*>(base64_strings[i].plaintext);
+    int plain_length = strlen(base64_strings[i].plaintext);
+    int cypher_length = strlen(base64_strings[i].cyphertext);
+    vector<char> buffer(cypher_length+1);
+    int encode_length = WebSafeBase64Escape(unsigned_plaintext,
+                                                     plain_length,
+                                                     &buffer[0],
+                                                     buffer.size(),
+                                                     false);
+    EXPECT_EQ(cypher_length, encode_length);
+    EXPECT_EQ(
+        CalculateBase64EscapedLen(plain_length, false), encode_length);
+    buffer[ encode_length ] = '\0';
+    EXPECT_STREQ(base64_strings[i].cyphertext, &buffer[0]);
+  }
+
+  // Verify the behavior when decoding bad data
+  {
+    const char* bad_data = "ab-/";
+    string buf;
+    EXPECT_FALSE(Base64Unescape(StringPiece(bad_data), &buf));
+    EXPECT_TRUE(!WebSafeBase64Unescape(bad_data, &buf));
+    EXPECT_TRUE(buf.empty());
+  }
+}
+
+}  // anonymous namespace
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/substitute.cc b/src/google/protobuf/stubs/substitute.cc
new file mode 100644
index 0000000..c9d9589
--- /dev/null
+++ b/src/google/protobuf/stubs/substitute.cc
@@ -0,0 +1,134 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+
+#include <google/protobuf/stubs/substitute.h>
+#include <google/protobuf/stubs/strutil.h>
+#include <google/protobuf/stubs/stl_util.h>
+
+namespace google {
+namespace protobuf {
+namespace strings {
+
+using internal::SubstituteArg;
+
+// Returns the number of args in arg_array which were passed explicitly
+// to Substitute().
+static int CountSubstituteArgs(const SubstituteArg* const* args_array) {
+  int count = 0;
+  while (args_array[count] != NULL && args_array[count]->size() != -1) {
+    ++count;
+  }
+  return count;
+}
+
+string Substitute(
+    const char* format,
+    const SubstituteArg& arg0, const SubstituteArg& arg1,
+    const SubstituteArg& arg2, const SubstituteArg& arg3,
+    const SubstituteArg& arg4, const SubstituteArg& arg5,
+    const SubstituteArg& arg6, const SubstituteArg& arg7,
+    const SubstituteArg& arg8, const SubstituteArg& arg9) {
+  string result;
+  SubstituteAndAppend(&result, format, arg0, arg1, arg2, arg3, arg4,
+                                       arg5, arg6, arg7, arg8, arg9);
+  return result;
+}
+
+void SubstituteAndAppend(
+    string* output, const char* format,
+    const SubstituteArg& arg0, const SubstituteArg& arg1,
+    const SubstituteArg& arg2, const SubstituteArg& arg3,
+    const SubstituteArg& arg4, const SubstituteArg& arg5,
+    const SubstituteArg& arg6, const SubstituteArg& arg7,
+    const SubstituteArg& arg8, const SubstituteArg& arg9) {
+  const SubstituteArg* const args_array[] = {
+    &arg0, &arg1, &arg2, &arg3, &arg4, &arg5, &arg6, &arg7, &arg8, &arg9, NULL
+  };
+
+  // Determine total size needed.
+  int size = 0;
+  for (int i = 0; format[i] != '\0'; i++) {
+    if (format[i] == '$') {
+      if (ascii_isdigit(format[i+1])) {
+        int index = format[i+1] - '0';
+        if (args_array[index]->size() == -1) {
+          GOOGLE_LOG(DFATAL)
+            << "strings::Substitute format string invalid: asked for \"$"
+            << index << "\", but only " << CountSubstituteArgs(args_array)
+            << " args were given.  Full format string was: \""
+            << CEscape(format) << "\".";
+          return;
+        }
+        size += args_array[index]->size();
+        ++i;  // Skip next char.
+      } else if (format[i+1] == '$') {
+        ++size;
+        ++i;  // Skip next char.
+      } else {
+        GOOGLE_LOG(DFATAL)
+          << "Invalid strings::Substitute() format string: \""
+          << CEscape(format) << "\".";
+        return;
+      }
+    } else {
+      ++size;
+    }
+  }
+
+  if (size == 0) return;
+
+  // Build the string.
+  int original_size = output->size();
+  STLStringResizeUninitialized(output, original_size + size);
+  char* target = string_as_array(output) + original_size;
+  for (int i = 0; format[i] != '\0'; i++) {
+    if (format[i] == '$') {
+      if (ascii_isdigit(format[i+1])) {
+        const SubstituteArg* src = args_array[format[i+1] - '0'];
+        memcpy(target, src->data(), src->size());
+        target += src->size();
+        ++i;  // Skip next char.
+      } else if (format[i+1] == '$') {
+        *target++ = '$';
+        ++i;  // Skip next char.
+      }
+    } else {
+      *target++ = format[i];
+    }
+  }
+
+  GOOGLE_DCHECK_EQ(target - output->data(), output->size());
+}
+
+}  // namespace strings
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/substitute.h b/src/google/protobuf/stubs/substitute.h
new file mode 100644
index 0000000..7ee442a
--- /dev/null
+++ b/src/google/protobuf/stubs/substitute.h
@@ -0,0 +1,170 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+// from google3/strings/substitute.h
+
+#include <string>
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/strutil.h>
+
+#ifndef GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
+#define GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
+
+namespace google {
+namespace protobuf {
+namespace strings {
+
+// ----------------------------------------------------------------------
+// strings::Substitute()
+// strings::SubstituteAndAppend()
+//   Kind of like StringPrintf, but different.
+//
+//   Example:
+//     string GetMessage(string first_name, string last_name, int age) {
+//       return strings::Substitute("My name is $0 $1 and I am $2 years old.",
+//                                  first_name, last_name, age);
+//     }
+//
+//   Differences from StringPrintf:
+//   * The format string does not identify the types of arguments.
+//     Instead, the magic of C++ deals with this for us.  See below
+//     for a list of accepted types.
+//   * Substitutions in the format string are identified by a '$'
+//     followed by a digit.  So, you can use arguments out-of-order and
+//     use the same argument multiple times.
+//   * It's much faster than StringPrintf.
+//
+//   Supported types:
+//   * Strings (const char*, const string&)
+//     * Note that this means you do not have to add .c_str() to all of
+//       your strings.  In fact, you shouldn't; it will be slower.
+//   * int32, int64, uint32, uint64:  Formatted using SimpleItoa().
+//   * float, double:  Formatted using SimpleFtoa() and SimpleDtoa().
+//   * bool:  Printed as "true" or "false".
+//
+//   SubstituteAndAppend() is like Substitute() but appends the result to
+//   *output.  Example:
+//
+//     string str;
+//     strings::SubstituteAndAppend(&str,
+//                                  "My name is $0 $1 and I am $2 years old.",
+//                                  first_name, last_name, age);
+//
+//   Substitute() is significantly faster than StringPrintf().  For very
+//   large strings, it may be orders of magnitude faster.
+// ----------------------------------------------------------------------
+
+namespace internal {  // Implementation details.
+
+class SubstituteArg {
+ public:
+  inline SubstituteArg(const char* value)
+    : text_(value), size_(strlen(text_)) {}
+  inline SubstituteArg(const string& value)
+    : text_(value.data()), size_(value.size()) {}
+
+  // Indicates that no argument was given.
+  inline explicit SubstituteArg()
+    : text_(NULL), size_(-1) {}
+
+  // Primitives
+  // We don't overload for signed and unsigned char because if people are
+  // explicitly declaring their chars as signed or unsigned then they are
+  // probably actually using them as 8-bit integers and would probably
+  // prefer an integer representation.  But, we don't really know.  So, we
+  // make the caller decide what to do.
+  inline SubstituteArg(char value)
+    : text_(scratch_), size_(1) { scratch_[0] = value; }
+  inline SubstituteArg(short value)
+    : text_(FastInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(unsigned short value)
+    : text_(FastUInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(int value)
+    : text_(FastInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(unsigned int value)
+    : text_(FastUInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(long value)
+    : text_(FastLongToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(unsigned long value)
+    : text_(FastULongToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(long long value)
+    : text_(FastInt64ToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(unsigned long long value)
+    : text_(FastUInt64ToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(float value)
+    : text_(FloatToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(double value)
+    : text_(DoubleToBuffer(value, scratch_)), size_(strlen(text_)) {}
+  inline SubstituteArg(bool value)
+    : text_(value ? "true" : "false"), size_(strlen(text_)) {}
+
+  inline const char* data() const { return text_; }
+  inline int size() const { return size_; }
+
+ private:
+  const char* text_;
+  int size_;
+  char scratch_[kFastToBufferSize];
+};
+
+}  // namespace internal
+
+LIBPROTOBUF_EXPORT string Substitute(
+  const char* format,
+  const internal::SubstituteArg& arg0 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg1 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg2 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg3 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg4 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg5 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg6 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg7 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg8 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg9 = internal::SubstituteArg());
+
+LIBPROTOBUF_EXPORT void SubstituteAndAppend(
+  string* output, const char* format,
+  const internal::SubstituteArg& arg0 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg1 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg2 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg3 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg4 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg5 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg6 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg7 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg8 = internal::SubstituteArg(),
+  const internal::SubstituteArg& arg9 = internal::SubstituteArg());
+
+}  // namespace strings
+}  // namespace protobuf
+}  // namespace google
+
+#endif // GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
diff --git a/src/google/protobuf/stubs/template_util.h b/src/google/protobuf/stubs/template_util.h
new file mode 100644
index 0000000..feef904
--- /dev/null
+++ b/src/google/protobuf/stubs/template_util.h
@@ -0,0 +1,138 @@
+// Copyright 2005 Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ----
+// Author: lar@google.com (Laramie Leavitt)
+//
+// Template metaprogramming utility functions.
+//
+// This code is compiled directly on many platforms, including client
+// platforms like Windows, Mac, and embedded systems.  Before making
+// any changes here, make sure that you're not breaking any platforms.
+//
+//
+// The names chosen here reflect those used in tr1 and the boost::mpl
+// library, there are similar operations used in the Loki library as
+// well.  I prefer the boost names for 2 reasons:
+// 1.  I think that portions of the Boost libraries are more likely to
+// be included in the c++ standard.
+// 2.  It is not impossible that some of the boost libraries will be
+// included in our own build in the future.
+// Both of these outcomes means that we may be able to directly replace
+// some of these with boost equivalents.
+//
+#ifndef GOOGLE_PROTOBUF_TEMPLATE_UTIL_H_
+#define GOOGLE_PROTOBUF_TEMPLATE_UTIL_H_
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// Types small_ and big_ are guaranteed such that sizeof(small_) <
+// sizeof(big_)
+typedef char small_;
+
+struct big_ {
+  char dummy[2];
+};
+
+// Identity metafunction.
+template <class T>
+struct identity_ {
+  typedef T type;
+};
+
+// integral_constant, defined in tr1, is a wrapper for an integer
+// value. We don't really need this generality; we could get away
+// with hardcoding the integer type to bool. We use the fully
+// general integer_constant for compatibility with tr1.
+
+template<class T, T v>
+struct integral_constant {
+  static const T value = v;
+  typedef T value_type;
+  typedef integral_constant<T, v> type;
+};
+
+template <class T, T v> const T integral_constant<T, v>::value;
+
+
+// Abbreviations: true_type and false_type are structs that represent boolean
+// true and false values. Also define the boost::mpl versions of those names,
+// true_ and false_.
+typedef integral_constant<bool, true>  true_type;
+typedef integral_constant<bool, false> false_type;
+typedef true_type  true_;
+typedef false_type false_;
+
+// if_ is a templatized conditional statement.
+// if_<cond, A, B> is a compile time evaluation of cond.
+// if_<>::type contains A if cond is true, B otherwise.
+template<bool cond, typename A, typename B>
+struct if_{
+  typedef A type;
+};
+
+template<typename A, typename B>
+struct if_<false, A, B> {
+  typedef B type;
+};
+
+
+// type_equals_ is a template type comparator, similar to Loki IsSameType.
+// type_equals_<A, B>::value is true iff "A" is the same type as "B".
+//
+// New code should prefer base::is_same, defined in base/type_traits.h.
+// It is functionally identical, but is_same is the standard spelling.
+template<typename A, typename B>
+struct type_equals_ : public false_ {
+};
+
+template<typename A>
+struct type_equals_<A, A> : public true_ {
+};
+
+// and_ is a template && operator.
+// and_<A, B>::value evaluates "A::value && B::value".
+template<typename A, typename B>
+struct and_ : public integral_constant<bool, (A::value && B::value)> {
+};
+
+// or_ is a template || operator.
+// or_<A, B>::value evaluates "A::value || B::value".
+template<typename A, typename B>
+struct or_ : public integral_constant<bool, (A::value || B::value)> {
+};
+
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_TEMPLATE_UTIL_H_
diff --git a/src/google/protobuf/stubs/template_util_unittest.cc b/src/google/protobuf/stubs/template_util_unittest.cc
new file mode 100644
index 0000000..b1745e2
--- /dev/null
+++ b/src/google/protobuf/stubs/template_util_unittest.cc
@@ -0,0 +1,130 @@
+// Copyright 2005 Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ----
+// Author: lar@google.com (Laramie Leavitt)
+//
+// These tests are really compile time tests.
+// If you try to step through this in a debugger
+// you will not see any evaluations, merely that
+// value is assigned true or false sequentially.
+
+#include <google/protobuf/stubs/template_util.h>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace GOOGLE_NAMESPACE = google::protobuf::internal;
+
+namespace google {
+namespace protobuf {
+namespace internal {
+namespace {
+
+TEST(TemplateUtilTest, TestSize) {
+  EXPECT_GT(sizeof(GOOGLE_NAMESPACE::big_), sizeof(GOOGLE_NAMESPACE::small_));
+}
+
+TEST(TemplateUtilTest, TestIntegralConstants) {
+  // test the built-in types.
+  EXPECT_TRUE(true_type::value);
+  EXPECT_FALSE(false_type::value);
+
+  typedef integral_constant<int, 1> one_type;
+  EXPECT_EQ(1, one_type::value);
+}
+
+TEST(TemplateUtilTest, TestTemplateIf) {
+  typedef if_<true, true_type, false_type>::type if_true;
+  EXPECT_TRUE(if_true::value);
+
+  typedef if_<false, true_type, false_type>::type if_false;
+  EXPECT_FALSE(if_false::value);
+}
+
+TEST(TemplateUtilTest, TestTemplateTypeEquals) {
+  // Check that the TemplateTypeEquals works correctly.
+  bool value = false;
+
+  // Test the same type is true.
+  value = type_equals_<int, int>::value;
+  EXPECT_TRUE(value);
+
+  // Test different types are false.
+  value = type_equals_<float, int>::value;
+  EXPECT_FALSE(value);
+
+  // Test type aliasing.
+  typedef const int foo;
+  value = type_equals_<const foo, const int>::value;
+  EXPECT_TRUE(value);
+}
+
+TEST(TemplateUtilTest, TestTemplateAndOr) {
+  // Check that the TemplateTypeEquals works correctly.
+  bool value = false;
+
+  // Yes && Yes == true.
+  value = and_<true_, true_>::value;
+  EXPECT_TRUE(value);
+  // Yes && No == false.
+  value = and_<true_, false_>::value;
+  EXPECT_FALSE(value);
+  // No && Yes == false.
+  value = and_<false_, true_>::value;
+  EXPECT_FALSE(value);
+  // No && No == false.
+  value = and_<false_, false_>::value;
+  EXPECT_FALSE(value);
+
+  // Yes || Yes == true.
+  value = or_<true_, true_>::value;
+  EXPECT_TRUE(value);
+  // Yes || No == true.
+  value = or_<true_, false_>::value;
+  EXPECT_TRUE(value);
+  // No || Yes == true.
+  value = or_<false_, true_>::value;
+  EXPECT_TRUE(value);
+  // No || No == false.
+  value = or_<false_, false_>::value;
+  EXPECT_FALSE(value);
+}
+
+TEST(TemplateUtilTest, TestIdentity) {
+  EXPECT_TRUE(
+      (type_equals_<GOOGLE_NAMESPACE::identity_<int>::type, int>::value));
+  EXPECT_TRUE(
+      (type_equals_<GOOGLE_NAMESPACE::identity_<void>::type, void>::value));
+}
+
+}  // anonymous namespace
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/time.cc b/src/google/protobuf/stubs/time.cc
new file mode 100644
index 0000000..49c0412
--- /dev/null
+++ b/src/google/protobuf/stubs/time.cc
@@ -0,0 +1,365 @@
+#include <google/protobuf/stubs/time.h>
+
+#include <ctime>
+
+#include <google/protobuf/stubs/stringprintf.h>
+#include <google/protobuf/stubs/strutil.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+namespace {
+static const int64 kSecondsPerMinute = 60;
+static const int64 kSecondsPerHour = 3600;
+static const int64 kSecondsPerDay = kSecondsPerHour * 24;
+static const int64 kSecondsPer400Years =
+    kSecondsPerDay * (400 * 365 + 400 / 4 - 3);
+// Seconds from 0001-01-01T00:00:00 to 1970-01-01T:00:00:00
+static const int64 kSecondsFromEraToEpoch = 62135596800LL;
+// The range of timestamp values we support.
+static const int64 kMinTime = -62135596800LL;  // 0001-01-01T00:00:00
+static const int64 kMaxTime = 253402300799LL;  // 9999-12-31T23:59:59
+
+static const int kNanosPerMillisecond = 1000000;
+static const int kNanosPerMicrosecond = 1000;
+
+// Count the seconds from the given year (start at Jan 1, 00:00) to 100 years
+// after.
+int64 SecondsPer100Years(int year) {
+  if (year % 400 == 0 || year % 400 > 300) {
+    return kSecondsPerDay * (100 * 365 + 100 / 4);
+  } else {
+    return kSecondsPerDay * (100 * 365 + 100 / 4 - 1);
+  }
+}
+
+// Count the seconds from the given year (start at Jan 1, 00:00) to 4 years
+// after.
+int64 SecondsPer4Years(int year) {
+  if ((year % 100 == 0 || year % 100 > 96) &&
+      !(year % 400 == 0 || year % 400 > 396)) {
+    // No leap years.
+    return kSecondsPerDay * (4 * 365);
+  } else {
+    // One leap years.
+    return kSecondsPerDay * (4 * 365 + 1);
+  }
+}
+
+bool IsLeapYear(int year) {
+  return year % 400 == 0 || (year % 4 == 0 && year % 100 != 0);
+}
+
+int64 SecondsPerYear(int year) {
+  return kSecondsPerDay * (IsLeapYear(year) ? 366 : 365);
+}
+
+static const int kDaysInMonth[13] = {
+  0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
+};
+
+int64 SecondsPerMonth(int month, bool leap) {
+  if (month == 2 && leap) {
+    return kSecondsPerDay * (kDaysInMonth[month] + 1);
+  }
+  return kSecondsPerDay * kDaysInMonth[month];
+}
+
+static const int kDaysSinceJan[13] = {
+  0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334,
+};
+
+bool ValidateDateTime(const DateTime& time) {
+  if (time.year < 1 || time.year > 9999 ||
+      time.month < 1 || time.month > 12 ||
+      time.day < 1 || time.day > 31 ||
+      time.hour < 0 || time.hour > 23 ||
+      time.minute < 0 || time.minute > 59 ||
+      time.second < 0 || time.second > 59) {
+    return false;
+  }
+  if (time.month == 2 && IsLeapYear(time.year)) {
+    return time.month <= kDaysInMonth[time.month] + 1;
+  } else {
+    return time.month <= kDaysInMonth[time.month];
+  }
+}
+
+// Count the number of seconds elapsed from 0001-01-01T00:00:00 to the given
+// time.
+int64 SecondsSinceCommonEra(const DateTime& time) {
+  int64 result = 0;
+  // Years should be between 1 and 9999.
+  assert(time.year >= 1 && time.year <= 9999);
+  int year = 1;
+  if ((time.year - year) >= 400) {
+    int count_400years = (time.year - year) / 400;
+    result += kSecondsPer400Years * count_400years;
+    year += count_400years * 400;
+  }
+  while ((time.year - year) >= 100) {
+    result += SecondsPer100Years(year);
+    year += 100;
+  }
+  while ((time.year - year) >= 4) {
+    result += SecondsPer4Years(year);
+    year += 4;
+  }
+  while (time.year > year) {
+    result += SecondsPerYear(year);
+    ++year;
+  }
+  // Months should be between 1 and 12.
+  assert(time.month >= 1 && time.month <= 12);
+  int month = time.month;
+  result += kSecondsPerDay * kDaysSinceJan[month];
+  if (month > 2 && IsLeapYear(year)) {
+    result += kSecondsPerDay;
+  }
+  assert(time.day >= 1 &&
+         time.day <= (month == 2 && IsLeapYear(year)
+                          ? kDaysInMonth[month] + 1
+                          : kDaysInMonth[month]));
+  result += kSecondsPerDay * (time.day - 1);
+  result += kSecondsPerHour * time.hour +
+      kSecondsPerMinute * time.minute +
+      time.second;
+  return result;
+}
+
+// Format nanoseconds with either 3, 6, or 9 digits depending on the required
+// precision to represent the exact value.
+string FormatNanos(int32 nanos) {
+  if (nanos % kNanosPerMillisecond == 0) {
+    return StringPrintf("%03d", nanos / kNanosPerMillisecond);
+  } else if (nanos % kNanosPerMicrosecond == 0) {
+    return StringPrintf("%06d", nanos / kNanosPerMicrosecond);
+  } else {
+    return StringPrintf("%09d", nanos);
+  }
+}
+
+// Parses an integer from a null-terminated char sequence. The method
+// consumes at most "width" chars. Returns a pointer after the consumed
+// integer, or NULL if the data does not start with an integer or the
+// integer value does not fall in the range of [min_value, max_value].
+const char* ParseInt(const char* data, int width, int min_value,
+                     int max_value, int* result) {
+  if (!ascii_isdigit(*data)) {
+    return NULL;
+  }
+  int value = 0;
+  for (int i = 0; i < width; ++i, ++data) {
+    if (ascii_isdigit(*data)) {
+      value = value * 10 + (*data - '0');
+    } else {
+      break;
+    }
+  }
+  if (value >= min_value && value <= max_value) {
+    *result = value;
+    return data;
+  } else {
+    return NULL;
+  }
+}
+
+// Consumes the fractional parts of a second into nanos. For example,
+// "010" will be parsed to 10000000 nanos.
+const char* ParseNanos(const char* data, int32* nanos) {
+  if (!ascii_isdigit(*data)) {
+    return NULL;
+  }
+  int value = 0;
+  int len = 0;
+  // Consume as many digits as there are but only take the first 9 into
+  // account.
+  while (ascii_isdigit(*data)) {
+    if (len < 9) {
+      value = value * 10 + *data - '0';
+    }
+    ++len;
+    ++data;
+  }
+  while (len < 9) {
+    value = value * 10;
+    ++len;
+  }
+  *nanos = value;
+  return data;
+}
+
+const char* ParseTimezoneOffset(const char* data, int64* offset) {
+  // Accept format "HH:MM". E.g., "08:00"
+  int hour;
+  if ((data = ParseInt(data, 2, 0, 23, &hour)) == NULL) {
+    return NULL;
+  }
+  if (*data++ != ':') {
+    return NULL;
+  }
+  int minute;
+  if ((data = ParseInt(data, 2, 0, 59, &minute)) == NULL) {
+    return NULL;
+  }
+  *offset = (hour * 60 + minute) * 60;
+  return data;
+}
+}  // namespace
+
+bool SecondsToDateTime(int64 seconds, DateTime* time) {
+  if (seconds < kMinTime || seconds > kMaxTime) {
+    return false;
+  }
+  // It's easier to calcuate the DateTime starting from 0001-01-01T00:00:00
+  seconds = seconds + kSecondsFromEraToEpoch;
+  int year = 1;
+  if (seconds >= kSecondsPer400Years) {
+    int count_400years = seconds / kSecondsPer400Years;
+    year += 400 * count_400years;
+    seconds %= kSecondsPer400Years;
+  }
+  while (seconds >= SecondsPer100Years(year)) {
+    seconds -= SecondsPer100Years(year);
+    year += 100;
+  }
+  while (seconds >= SecondsPer4Years(year)) {
+    seconds -= SecondsPer4Years(year);
+    year += 4;
+  }
+  while (seconds >= SecondsPerYear(year)) {
+    seconds -= SecondsPerYear(year);
+    year += 1;
+  }
+  bool leap = IsLeapYear(year);
+  int month = 1;
+  while (seconds >= SecondsPerMonth(month, leap)) {
+    seconds -= SecondsPerMonth(month, leap);
+    ++month;
+  }
+  int day = 1 + seconds / kSecondsPerDay;
+  seconds %= kSecondsPerDay;
+  int hour = seconds / kSecondsPerHour;
+  seconds %= kSecondsPerHour;
+  int minute = seconds / kSecondsPerMinute;
+  seconds %= kSecondsPerMinute;
+  time->year = year;
+  time->month = month;
+  time->day = day;
+  time->hour = hour;
+  time->minute = minute;
+  time->second = static_cast<int>(seconds);
+  return true;
+}
+
+bool DateTimeToSeconds(const DateTime& time, int64* seconds) {
+  if (!ValidateDateTime(time)) {
+    return false;
+  }
+  *seconds = SecondsSinceCommonEra(time) - kSecondsFromEraToEpoch;
+  return true;
+}
+
+void GetCurrentTime(int64* seconds, int32* nanos) {
+  // TODO(xiaofeng): Improve the accuracy of this implementation (or just
+  // remove this method from protobuf).
+  *seconds = time(NULL);
+  *nanos = 0;
+}
+
+string FormatTime(int64 seconds, int32 nanos) {
+  DateTime time;
+  if (nanos < 0 || nanos > 999999999 || !SecondsToDateTime(seconds, &time)) {
+    return "InvalidTime";
+  }
+  string result = StringPrintf("%04d-%02d-%02dT%02d:%02d:%02d",
+                               time.year, time.month, time.day,
+                               time.hour, time.minute, time.second);
+  if (nanos != 0) {
+    result += "." + FormatNanos(nanos);
+  }
+  return result + "Z";
+}
+
+bool ParseTime(const string& value, int64* seconds, int32* nanos) {
+  DateTime time;
+  const char* data = value.c_str();
+  // We only accept:
+  //   Z-normalized: 2015-05-20T13:29:35.120Z
+  //   With UTC offset: 2015-05-20T13:29:35.120-08:00
+
+  // Parse year
+  if ((data = ParseInt(data, 4, 1, 9999, &time.year)) == NULL) {
+    return false;
+  }
+  // Expect '-'
+  if (*data++ != '-') return false;
+  // Parse month
+  if ((data = ParseInt(data, 2, 1, 12, &time.month)) == NULL) {
+    return false;
+  }
+  // Expect '-'
+  if (*data++ != '-') return false;
+  // Parse day
+  if ((data = ParseInt(data, 2, 1, 31, &time.day)) == NULL) {
+    return false;
+  }
+  // Expect 'T'
+  if (*data++ != 'T') return false;
+  // Parse hour
+  if ((data = ParseInt(data, 2, 0, 23, &time.hour)) == NULL) {
+    return false;
+  }
+  // Expect ':'
+  if (*data++ != ':') return false;
+  // Parse minute
+  if ((data = ParseInt(data, 2, 0, 59, &time.minute)) == NULL) {
+    return false;
+  }
+  // Expect ':'
+  if (*data++ != ':') return false;
+  // Parse second
+  if ((data = ParseInt(data, 2, 0, 59, &time.second)) == NULL) {
+    return false;
+  }
+  if (!DateTimeToSeconds(time, seconds)) {
+    return false;
+  }
+  // Parse nanoseconds.
+  if (*data == '.') {
+    ++data;
+    // Parse nanoseconds.
+    if ((data = ParseNanos(data, nanos)) == NULL) {
+      return false;
+    }
+  } else {
+    *nanos = 0;
+  }
+  // Parse UTC offsets.
+  if (*data == 'Z') {
+    ++data;
+  } else if (*data == '+') {
+    ++data;
+    int64 offset;
+    if ((data = ParseTimezoneOffset(data, &offset)) == NULL) {
+      return false;
+    }
+    *seconds -= offset;
+  } else if (*data == '-') {
+    ++data;
+    int64 offset;
+    if ((data = ParseTimezoneOffset(data, &offset)) == NULL) {
+      return false;
+    }
+    *seconds += offset;
+  } else {
+    return false;
+  }
+  // Done with parsing.
+  return *data == 0;
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/time.h b/src/google/protobuf/stubs/time.h
new file mode 100644
index 0000000..20a6b56
--- /dev/null
+++ b/src/google/protobuf/stubs/time.h
@@ -0,0 +1,75 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef GOOGLE_PROTOBUF_STUBS_TIME_H_
+#define GOOGLE_PROTOBUF_STUBS_TIME_H_
+
+#include <google/protobuf/stubs/common.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+struct DateTime {
+  int year;
+  int month;
+  int day;
+  int hour;
+  int minute;
+  int second;
+};
+
+// Converts a timestamp (seconds elapsed since 1970-01-01T00:00:00, could be
+// negative to represent time before 1970-01-01) to DateTime. Returns false
+// if the timestamp is not in the range between 0001-01-01T00:00:00 and
+// 9999-12-31T23:59:59.
+bool LIBPROTOBUF_EXPORT SecondsToDateTime(int64 seconds, DateTime* time);
+// Converts DateTime to a timestamp (seconds since 1970-01-01T00:00:00).
+// Returns false if the DateTime is not valid or is not in the valid range.
+bool LIBPROTOBUF_EXPORT DateTimeToSeconds(const DateTime& time, int64* seconds);
+
+void LIBPROTOBUF_EXPORT GetCurrentTime(int64* seconds, int32* nanos);
+
+// Formats a time string in RFC3339 fromat.
+//
+// For example, "2015-05-20T13:29:35.120Z". For nanos, 0, 3, 6 or 9 fractional
+// digits will be used depending on how many are required to represent the exact
+// value.
+//
+// Note that "nanos" must in the range of [0, 999999999].
+string LIBPROTOBUF_EXPORT FormatTime(int64 seconds, int32 nanos);
+// Parses a time string. This method accepts RFC3339 date/time string with UTC
+// offset. For example, "2015-05-20T13:29:35.120-08:00".
+bool LIBPROTOBUF_EXPORT ParseTime(const string& vaule, int64* seconds, int32* nanos);
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_STUBS_TIME_H_
diff --git a/src/google/protobuf/stubs/time_test.cc b/src/google/protobuf/stubs/time_test.cc
new file mode 100644
index 0000000..59e9d1c
--- /dev/null
+++ b/src/google/protobuf/stubs/time_test.cc
@@ -0,0 +1,208 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <google/protobuf/stubs/time.h>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+namespace google {
+namespace protobuf {
+namespace internal {
+namespace {
+static const int64 kSecondsPerDay = 3600 * 24;
+
+// For DateTime, tests will mostly focuse on the date part because that's
+// the tricky one.
+int64 CreateTimestamp(int year, int month, int day) {
+  DateTime time;
+  time.year = year;
+  time.month = month;
+  time.day = day;
+  time.hour = time.minute = time.second = 0;
+  int64 result;
+  GOOGLE_CHECK(DateTimeToSeconds(time, &result));
+  // Check that a roundtrip produces the same result.
+  GOOGLE_CHECK(SecondsToDateTime(result, &time));
+  GOOGLE_CHECK(time.year == year);
+  GOOGLE_CHECK(time.month == month);
+  GOOGLE_CHECK(time.day == day);
+  return result;
+}
+
+TEST(DateTimeTest, SimpleTime) {
+  DateTime time;
+  ASSERT_TRUE(SecondsToDateTime(1, &time));
+  EXPECT_EQ(1970, time.year);
+  EXPECT_EQ(1, time.month);
+  EXPECT_EQ(1, time.day);
+  EXPECT_EQ(0, time.hour);
+  EXPECT_EQ(0, time.minute);
+  EXPECT_EQ(1, time.second);
+  int64 seconds;
+  ASSERT_TRUE(DateTimeToSeconds(time, &seconds));
+  EXPECT_EQ(1, seconds);
+
+  ASSERT_TRUE(SecondsToDateTime(-1, &time));
+  EXPECT_EQ(1969, time.year);
+  EXPECT_EQ(12, time.month);
+  EXPECT_EQ(31, time.day);
+  EXPECT_EQ(23, time.hour);
+  EXPECT_EQ(59, time.minute);
+  EXPECT_EQ(59, time.second);
+  ASSERT_TRUE(DateTimeToSeconds(time, &seconds));
+  EXPECT_EQ(-1, seconds);
+
+  DateTime start, end;
+  start.year = 1;
+  start.month = 1;
+  start.day = 1;
+  start.hour = 0;
+  start.minute = 0;
+  start.second = 0;
+  end.year = 9999;
+  end.month = 12;
+  end.day = 31;
+  end.hour = 23;
+  end.minute = 59;
+  end.second = 59;
+  int64 start_time, end_time;
+  ASSERT_TRUE(DateTimeToSeconds(start, &start_time));
+  ASSERT_TRUE(DateTimeToSeconds(end, &end_time));
+  EXPECT_EQ(315537897599LL, end_time - start_time);
+  ASSERT_TRUE(SecondsToDateTime(start_time, &time));
+  ASSERT_TRUE(DateTimeToSeconds(time, &seconds));
+  EXPECT_EQ(start_time, seconds);
+  ASSERT_TRUE(SecondsToDateTime(end_time, &time));
+  ASSERT_TRUE(DateTimeToSeconds(time, &seconds));
+  EXPECT_EQ(end_time, seconds);
+}
+
+TEST(DateTimeTest, DayInMonths) {
+  // Check that month boundaries are handled correctly.
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 1, 1) - CreateTimestamp(2014, 12, 31));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 2, 1) - CreateTimestamp(2015, 1, 31));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 3, 1) - CreateTimestamp(2015, 2, 28));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 4, 1) - CreateTimestamp(2015, 3, 31));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 5, 1) - CreateTimestamp(2015, 4, 30));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 6, 1) - CreateTimestamp(2015, 5, 31));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 7, 1) - CreateTimestamp(2015, 6, 30));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 8, 1) - CreateTimestamp(2015, 7, 31));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 9, 1) - CreateTimestamp(2015, 8, 31));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 10, 1) - CreateTimestamp(2015, 9, 30));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 11, 1) - CreateTimestamp(2015, 10, 31));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 12, 1) - CreateTimestamp(2015, 11, 30));
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2016, 1, 1) - CreateTimestamp(2015, 12, 31));
+}
+
+TEST(DateTimeTest, LeapYear) {
+  // Non-leap year.
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2015, 3, 1) - CreateTimestamp(2015, 2, 28));
+  // Leap year.
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2016, 3, 1) - CreateTimestamp(2016, 2, 29));
+  // Non-leap year.
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2100, 3, 1) - CreateTimestamp(2100, 2, 28));
+  // Leap year.
+  EXPECT_EQ(kSecondsPerDay,
+            CreateTimestamp(2400, 3, 1) - CreateTimestamp(2400, 2, 29));
+}
+
+TEST(DateTimeTest, StringFormat) {
+  DateTime start, end;
+  start.year = 1;
+  start.month = 1;
+  start.day = 1;
+  start.hour = 0;
+  start.minute = 0;
+  start.second = 0;
+  end.year = 9999;
+  end.month = 12;
+  end.day = 31;
+  end.hour = 23;
+  end.minute = 59;
+  end.second = 59;
+  int64 start_time, end_time;
+  ASSERT_TRUE(DateTimeToSeconds(start, &start_time));
+  ASSERT_TRUE(DateTimeToSeconds(end, &end_time));
+
+  EXPECT_EQ("0001-01-01T00:00:00Z", FormatTime(start_time, 0));
+  EXPECT_EQ("9999-12-31T23:59:59Z", FormatTime(end_time, 0));
+
+  // Make sure the nanoseconds part is formated correctly.
+  EXPECT_EQ("1970-01-01T00:00:00.010Z", FormatTime(0, 10000000));
+  EXPECT_EQ("1970-01-01T00:00:00.000010Z", FormatTime(0, 10000));
+  EXPECT_EQ("1970-01-01T00:00:00.000000010Z", FormatTime(0, 10));
+}
+
+TEST(DateTimeTest, ParseString) {
+  int64 seconds;
+  int32 nanos;
+  ASSERT_TRUE(ParseTime("0001-01-01T00:00:00Z", &seconds, &nanos));
+  EXPECT_EQ("0001-01-01T00:00:00Z", FormatTime(seconds, nanos));
+  ASSERT_TRUE(ParseTime("9999-12-31T23:59:59.999999999Z", &seconds, &nanos));
+  EXPECT_EQ("9999-12-31T23:59:59.999999999Z", FormatTime(seconds, nanos));
+
+  // Test time zone offsets.
+  ASSERT_TRUE(ParseTime("1970-01-01T00:00:00-08:00", &seconds, &nanos));
+  EXPECT_EQ("1970-01-01T08:00:00Z", FormatTime(seconds, nanos));
+  ASSERT_TRUE(ParseTime("1970-01-01T00:00:00+08:00", &seconds, &nanos));
+  EXPECT_EQ("1969-12-31T16:00:00Z", FormatTime(seconds, nanos));
+
+  // Test nanoseconds.
+  ASSERT_TRUE(ParseTime("1970-01-01T00:00:00.01Z", &seconds, &nanos));
+  EXPECT_EQ("1970-01-01T00:00:00.010Z", FormatTime(seconds, nanos));
+  ASSERT_TRUE(ParseTime("1970-01-01T00:00:00.00001-08:00", &seconds, &nanos));
+  EXPECT_EQ("1970-01-01T08:00:00.000010Z", FormatTime(seconds, nanos));
+  ASSERT_TRUE(ParseTime("1970-01-01T00:00:00.00000001+08:00", &seconds, &nanos));
+  EXPECT_EQ("1969-12-31T16:00:00.000000010Z", FormatTime(seconds, nanos));
+  // Fractional parts less than 1 nanosecond will be ignored.
+  ASSERT_TRUE(ParseTime("1970-01-01T00:00:00.0123456789Z", &seconds, &nanos));
+  EXPECT_EQ("1970-01-01T00:00:00.012345678Z", FormatTime(seconds, nanos));
+}
+
+}  // namespace
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
diff --git a/src/google/protobuf/stubs/type_traits.h b/src/google/protobuf/stubs/type_traits.h
new file mode 100644
index 0000000..0d8127e
--- /dev/null
+++ b/src/google/protobuf/stubs/type_traits.h
@@ -0,0 +1,364 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ----
+// Author: Matt Austern
+//
+// This code is compiled directly on many platforms, including client
+// platforms like Windows, Mac, and embedded systems.  Before making
+// any changes here, make sure that you're not breaking any platforms.
+//
+// Define a small subset of tr1 type traits. The traits we define are:
+//   enable_if
+//   is_integral
+//   is_floating_point
+//   is_pointer
+//   is_enum
+//   is_reference
+//   is_pod
+//   has_trivial_constructor
+//   has_trivial_copy
+//   has_trivial_assign
+//   has_trivial_destructor
+//   remove_const
+//   remove_volatile
+//   remove_cv
+//   remove_reference
+//   add_reference
+//   remove_pointer
+//   is_same
+//   is_convertible
+// We can add more type traits as required.
+
+#ifndef GOOGLE_PROTOBUF_TYPE_TRAITS_H_
+#define GOOGLE_PROTOBUF_TYPE_TRAITS_H_
+
+#include <cstddef>                  // for NULL
+#include <utility>                  // For pair
+
+#include <google/protobuf/stubs/template_util.h>  // For true_type and false_type
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+template<typename B, typename D>
+struct is_base_of {
+  typedef char (&yes)[1];
+  typedef char (&no)[2];
+
+  // BEGIN GOOGLE LOCAL MODIFICATION -- check is a #define on Mac.
+  #undef check
+  // END GOOGLE LOCAL MODIFICATION
+
+  static yes check(const B*);
+  static no check(const void*);
+
+  enum {
+    value = sizeof(check(static_cast<const D*>(NULL))) == sizeof(yes),
+  };
+};
+
+template <bool cond, class T = void> struct enable_if;
+template <class T> struct is_integral;
+template <class T> struct is_floating_point;
+template <class T> struct is_pointer;
+// MSVC can't compile this correctly, and neither can gcc 3.3.5 (at least)
+#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
+// is_enum uses is_convertible, which is not available on MSVC.
+template <class T> struct is_enum;
+#endif
+template <class T> struct is_reference;
+template <class T> struct is_pod;
+template <class T> struct has_trivial_constructor;
+template <class T> struct has_trivial_copy;
+template <class T> struct has_trivial_assign;
+template <class T> struct has_trivial_destructor;
+template <class T> struct remove_const;
+template <class T> struct remove_volatile;
+template <class T> struct remove_cv;
+template <class T> struct remove_reference;
+template <class T> struct add_reference;
+template <class T> struct remove_pointer;
+template <class T, class U> struct is_same;
+#if !(defined(__GNUC__) && __GNUC__ <= 3)
+template <class From, class To> struct is_convertible;
+#endif
+
+// enable_if, equivalent semantics to c++11 std::enable_if, specifically:
+//   "If B is true, the member typedef type shall equal T; otherwise, there
+//    shall be no member typedef type."
+// Specified by 20.9.7.6 [Other transformations]
+
+template<bool cond, class T> struct enable_if { typedef T type; };
+template<class T> struct enable_if<false, T> {};
+// is_integral is false except for the built-in integer types. A
+// cv-qualified type is integral if and only if the underlying type is.
+template <class T> struct is_integral : false_type { };
+template<> struct is_integral<bool> : true_type { };
+template<> struct is_integral<char> : true_type { };
+template<> struct is_integral<unsigned char> : true_type { };
+template<> struct is_integral<signed char> : true_type { };
+#if defined(_MSC_VER)
+// wchar_t is not by default a distinct type from unsigned short in
+// Microsoft C.
+// See http://msdn2.microsoft.com/en-us/library/dh8che7s(VS.80).aspx
+template<> struct is_integral<__wchar_t> : true_type { };
+#else
+template<> struct is_integral<wchar_t> : true_type { };
+#endif
+template<> struct is_integral<short> : true_type { };
+template<> struct is_integral<unsigned short> : true_type { };
+template<> struct is_integral<int> : true_type { };
+template<> struct is_integral<unsigned int> : true_type { };
+template<> struct is_integral<long> : true_type { };
+template<> struct is_integral<unsigned long> : true_type { };
+#ifdef HAVE_LONG_LONG
+template<> struct is_integral<long long> : true_type { };
+template<> struct is_integral<unsigned long long> : true_type { };
+#endif
+template <class T> struct is_integral<const T> : is_integral<T> { };
+template <class T> struct is_integral<volatile T> : is_integral<T> { };
+template <class T> struct is_integral<const volatile T> : is_integral<T> { };
+
+// is_floating_point is false except for the built-in floating-point types.
+// A cv-qualified type is integral if and only if the underlying type is.
+template <class T> struct is_floating_point : false_type { };
+template<> struct is_floating_point<float> : true_type { };
+template<> struct is_floating_point<double> : true_type { };
+template<> struct is_floating_point<long double> : true_type { };
+template <class T> struct is_floating_point<const T>
+    : is_floating_point<T> { };
+template <class T> struct is_floating_point<volatile T>
+    : is_floating_point<T> { };
+template <class T> struct is_floating_point<const volatile T>
+    : is_floating_point<T> { };
+
+// is_pointer is false except for pointer types. A cv-qualified type (e.g.
+// "int* const", as opposed to "int const*") is cv-qualified if and only if
+// the underlying type is.
+template <class T> struct is_pointer : false_type { };
+template <class T> struct is_pointer<T*> : true_type { };
+template <class T> struct is_pointer<const T> : is_pointer<T> { };
+template <class T> struct is_pointer<volatile T> : is_pointer<T> { };
+template <class T> struct is_pointer<const volatile T> : is_pointer<T> { };
+
+#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
+
+namespace type_traits_internal {
+
+template <class T> struct is_class_or_union {
+  template <class U> static small_ tester(void (U::*)());
+  template <class U> static big_ tester(...);
+  static const bool value = sizeof(tester<T>(0)) == sizeof(small_);
+};
+
+// is_convertible chokes if the first argument is an array. That's why
+// we use add_reference here.
+template <bool NotUnum, class T> struct is_enum_impl
+    : is_convertible<typename add_reference<T>::type, int> { };
+
+template <class T> struct is_enum_impl<true, T> : false_type { };
+
+}  // namespace type_traits_internal
+
+// Specified by TR1 [4.5.1] primary type categories.
+
+// Implementation note:
+//
+// Each type is either void, integral, floating point, array, pointer,
+// reference, member object pointer, member function pointer, enum,
+// union or class. Out of these, only integral, floating point, reference,
+// class and enum types are potentially convertible to int. Therefore,
+// if a type is not a reference, integral, floating point or class and
+// is convertible to int, it's a enum. Adding cv-qualification to a type
+// does not change whether it's an enum.
+//
+// Is-convertible-to-int check is done only if all other checks pass,
+// because it can't be used with some types (e.g. void or classes with
+// inaccessible conversion operators).
+template <class T> struct is_enum
+    : type_traits_internal::is_enum_impl<
+          is_same<T, void>::value ||
+              is_integral<T>::value ||
+              is_floating_point<T>::value ||
+              is_reference<T>::value ||
+              type_traits_internal::is_class_or_union<T>::value,
+          T> { };
+
+template <class T> struct is_enum<const T> : is_enum<T> { };
+template <class T> struct is_enum<volatile T> : is_enum<T> { };
+template <class T> struct is_enum<const volatile T> : is_enum<T> { };
+
+#endif
+
+// is_reference is false except for reference types.
+template<typename T> struct is_reference : false_type {};
+template<typename T> struct is_reference<T&> : true_type {};
+
+
+// We can't get is_pod right without compiler help, so fail conservatively.
+// We will assume it's false except for arithmetic types, enumerations,
+// pointers and cv-qualified versions thereof. Note that std::pair<T,U>
+// is not a POD even if T and U are PODs.
+template <class T> struct is_pod
+ : integral_constant<bool, (is_integral<T>::value ||
+                            is_floating_point<T>::value ||
+#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
+                            // is_enum is not available on MSVC.
+                            is_enum<T>::value ||
+#endif
+                            is_pointer<T>::value)> { };
+template <class T> struct is_pod<const T> : is_pod<T> { };
+template <class T> struct is_pod<volatile T> : is_pod<T> { };
+template <class T> struct is_pod<const volatile T> : is_pod<T> { };
+
+
+// We can't get has_trivial_constructor right without compiler help, so
+// fail conservatively. We will assume it's false except for: (1) types
+// for which is_pod is true. (2) std::pair of types with trivial
+// constructors. (3) array of a type with a trivial constructor.
+// (4) const versions thereof.
+template <class T> struct has_trivial_constructor : is_pod<T> { };
+template <class T, class U> struct has_trivial_constructor<std::pair<T, U> >
+  : integral_constant<bool,
+                      (has_trivial_constructor<T>::value &&
+                       has_trivial_constructor<U>::value)> { };
+template <class A, int N> struct has_trivial_constructor<A[N]>
+  : has_trivial_constructor<A> { };
+template <class T> struct has_trivial_constructor<const T>
+  : has_trivial_constructor<T> { };
+
+// We can't get has_trivial_copy right without compiler help, so fail
+// conservatively. We will assume it's false except for: (1) types
+// for which is_pod is true. (2) std::pair of types with trivial copy
+// constructors. (3) array of a type with a trivial copy constructor.
+// (4) const versions thereof.
+template <class T> struct has_trivial_copy : is_pod<T> { };
+template <class T, class U> struct has_trivial_copy<std::pair<T, U> >
+  : integral_constant<bool,
+                      (has_trivial_copy<T>::value &&
+                       has_trivial_copy<U>::value)> { };
+template <class A, int N> struct has_trivial_copy<A[N]>
+  : has_trivial_copy<A> { };
+template <class T> struct has_trivial_copy<const T> : has_trivial_copy<T> { };
+
+// We can't get has_trivial_assign right without compiler help, so fail
+// conservatively. We will assume it's false except for: (1) types
+// for which is_pod is true. (2) std::pair of types with trivial copy
+// constructors. (3) array of a type with a trivial assign constructor.
+template <class T> struct has_trivial_assign : is_pod<T> { };
+template <class T, class U> struct has_trivial_assign<std::pair<T, U> >
+  : integral_constant<bool,
+                      (has_trivial_assign<T>::value &&
+                       has_trivial_assign<U>::value)> { };
+template <class A, int N> struct has_trivial_assign<A[N]>
+  : has_trivial_assign<A> { };
+
+// We can't get has_trivial_destructor right without compiler help, so
+// fail conservatively. We will assume it's false except for: (1) types
+// for which is_pod is true. (2) std::pair of types with trivial
+// destructors. (3) array of a type with a trivial destructor.
+// (4) const versions thereof.
+template <class T> struct has_trivial_destructor : is_pod<T> { };
+template <class T, class U> struct has_trivial_destructor<std::pair<T, U> >
+  : integral_constant<bool,
+                      (has_trivial_destructor<T>::value &&
+                       has_trivial_destructor<U>::value)> { };
+template <class A, int N> struct has_trivial_destructor<A[N]>
+  : has_trivial_destructor<A> { };
+template <class T> struct has_trivial_destructor<const T>
+  : has_trivial_destructor<T> { };
+
+// Specified by TR1 [4.7.1]
+template<typename T> struct remove_const { typedef T type; };
+template<typename T> struct remove_const<T const> { typedef T type; };
+template<typename T> struct remove_volatile { typedef T type; };
+template<typename T> struct remove_volatile<T volatile> { typedef T type; };
+template<typename T> struct remove_cv {
+  typedef typename remove_const<typename remove_volatile<T>::type>::type type;
+};
+
+
+// Specified by TR1 [4.7.2] Reference modifications.
+template<typename T> struct remove_reference { typedef T type; };
+template<typename T> struct remove_reference<T&> { typedef T type; };
+
+template <typename T> struct add_reference { typedef T& type; };
+template <typename T> struct add_reference<T&> { typedef T& type; };
+
+// Specified by TR1 [4.7.4] Pointer modifications.
+template<typename T> struct remove_pointer { typedef T type; };
+template<typename T> struct remove_pointer<T*> { typedef T type; };
+template<typename T> struct remove_pointer<T* const> { typedef T type; };
+template<typename T> struct remove_pointer<T* volatile> { typedef T type; };
+template<typename T> struct remove_pointer<T* const volatile> {
+  typedef T type; };
+
+// Specified by TR1 [4.6] Relationships between types
+template<typename T, typename U> struct is_same : public false_type { };
+template<typename T> struct is_same<T, T> : public true_type { };
+
+// Specified by TR1 [4.6] Relationships between types
+#if !(defined(__GNUC__) && __GNUC__ <= 3)
+namespace type_traits_internal {
+
+// This class is an implementation detail for is_convertible, and you
+// don't need to know how it works to use is_convertible. For those
+// who care: we declare two different functions, one whose argument is
+// of type To and one with a variadic argument list. We give them
+// return types of different size, so we can use sizeof to trick the
+// compiler into telling us which function it would have chosen if we
+// had called it with an argument of type From.  See Alexandrescu's
+// _Modern C++ Design_ for more details on this sort of trick.
+
+template <typename From, typename To>
+struct ConvertHelper {
+  static small_ Test(To);
+  static big_ Test(...);
+  static From Create();
+  enum {
+    value = sizeof(Test(Create())) == sizeof(small_)
+  };
+};
+}  // namespace type_traits_internal
+
+// Inherits from true_type if From is convertible to To, false_type otherwise.
+template <typename From, typename To>
+struct is_convertible
+    : integral_constant<bool,
+                        type_traits_internal::ConvertHelper<From, To>::value> {
+};
+#endif
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#endif  // GOOGLE_PROTOBUF_TYPE_TRAITS_H_
diff --git a/src/google/protobuf/stubs/type_traits_unittest.cc b/src/google/protobuf/stubs/type_traits_unittest.cc
new file mode 100644
index 0000000..49c10ac
--- /dev/null
+++ b/src/google/protobuf/stubs/type_traits_unittest.cc
@@ -0,0 +1,631 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ----
+// Author: Matt Austern
+
+#include <google/protobuf/stubs/type_traits.h>
+
+#include <stdlib.h>   // for exit()
+#include <stdio.h>
+#include <string>
+#include <vector>
+
+#include <google/protobuf/testing/googletest.h>
+#include <gtest/gtest.h>
+
+typedef int int32;
+// IBM AIX typedefs `int64` in `sys/inttypes.h`, included transitively above.
+#ifndef _AIX
+typedef long int64;
+#endif
+
+using std::string;
+using std::vector;
+using std::pair;
+
+
+// This assertion produces errors like "error: invalid use of
+// incomplete type 'struct <unnamed>::AssertTypesEq<const int, int>'"
+// when it fails.
+template<typename T, typename U> struct AssertTypesEq;
+template<typename T> struct AssertTypesEq<T, T> {};
+#define COMPILE_ASSERT_TYPES_EQ(T, U) static_cast<void>(AssertTypesEq<T, U>())
+
+// A user-defined POD type.
+struct A {
+  int n_;
+};
+
+// A user-defined non-POD type with a trivial copy constructor.
+class B {
+ public:
+  explicit B(int n) : n_(n) { }
+ private:
+  int n_;
+};
+
+// Another user-defined non-POD type with a trivial copy constructor.
+// We will explicitly declare C to have a trivial copy constructor
+// by specializing has_trivial_copy.
+class C {
+ public:
+  explicit C(int n) : n_(n) { }
+ private:
+  int n_;
+};
+
+namespace google {
+namespace protobuf {
+namespace internal {
+template<> struct has_trivial_copy<C> : true_type { };
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+// Another user-defined non-POD type with a trivial assignment operator.
+// We will explicitly declare C to have a trivial assignment operator
+// by specializing has_trivial_assign.
+class D {
+ public:
+  explicit D(int n) : n_(n) { }
+ private:
+  int n_;
+};
+
+namespace google {
+namespace protobuf {
+namespace internal {
+template<> struct has_trivial_assign<D> : true_type { };
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+// Another user-defined non-POD type with a trivial constructor.
+// We will explicitly declare E to have a trivial constructor
+// by specializing has_trivial_constructor.
+class E {
+ public:
+  int n_;
+};
+
+namespace google {
+namespace protobuf {
+namespace internal {
+template<> struct has_trivial_constructor<E> : true_type { };
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+// Another user-defined non-POD type with a trivial destructor.
+// We will explicitly declare E to have a trivial destructor
+// by specializing has_trivial_destructor.
+class F {
+ public:
+  explicit F(int n) : n_(n) { }
+ private:
+  int n_;
+};
+
+namespace google {
+namespace protobuf {
+namespace internal {
+template<> struct has_trivial_destructor<F> : true_type { };
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+enum G {};
+
+union H {};
+
+class I {
+ public:
+  operator int() const;
+};
+
+class J {
+ private:
+  operator int() const;
+};
+
+namespace google {
+namespace protobuf {
+namespace internal {
+namespace {
+
+// A base class and a derived class that inherits from it, used for
+// testing conversion type traits.
+class Base {
+ public:
+  virtual ~Base() { }
+};
+
+class Derived : public Base {
+};
+
+TEST(TypeTraitsTest, TestIsInteger) {
+  // Verify that is_integral is true for all integer types.
+  EXPECT_TRUE(is_integral<bool>::value);
+  EXPECT_TRUE(is_integral<char>::value);
+  EXPECT_TRUE(is_integral<unsigned char>::value);
+  EXPECT_TRUE(is_integral<signed char>::value);
+  EXPECT_TRUE(is_integral<wchar_t>::value);
+  EXPECT_TRUE(is_integral<int>::value);
+  EXPECT_TRUE(is_integral<unsigned int>::value);
+  EXPECT_TRUE(is_integral<short>::value);
+  EXPECT_TRUE(is_integral<unsigned short>::value);
+  EXPECT_TRUE(is_integral<long>::value);
+  EXPECT_TRUE(is_integral<unsigned long>::value);
+
+  // Verify that is_integral is false for a few non-integer types.
+  EXPECT_FALSE(is_integral<void>::value);
+  EXPECT_FALSE(is_integral<float>::value);
+  EXPECT_FALSE(is_integral<string>::value);
+  EXPECT_FALSE(is_integral<int*>::value);
+  EXPECT_FALSE(is_integral<A>::value);
+  EXPECT_FALSE((is_integral<pair<int, int> >::value));
+
+  // Verify that cv-qualified integral types are still integral, and
+  // cv-qualified non-integral types are still non-integral.
+  EXPECT_TRUE(is_integral<const char>::value);
+  EXPECT_TRUE(is_integral<volatile bool>::value);
+  EXPECT_TRUE(is_integral<const volatile unsigned int>::value);
+  EXPECT_FALSE(is_integral<const float>::value);
+  EXPECT_FALSE(is_integral<int* volatile>::value);
+  EXPECT_FALSE(is_integral<const volatile string>::value);
+}
+
+TEST(TypeTraitsTest, TestIsFloating) {
+  // Verify that is_floating_point is true for all floating-point types.
+  EXPECT_TRUE(is_floating_point<float>::value);
+  EXPECT_TRUE(is_floating_point<double>::value);
+  EXPECT_TRUE(is_floating_point<long double>::value);
+
+  // Verify that is_floating_point is false for a few non-float types.
+  EXPECT_FALSE(is_floating_point<void>::value);
+  EXPECT_FALSE(is_floating_point<long>::value);
+  EXPECT_FALSE(is_floating_point<string>::value);
+  EXPECT_FALSE(is_floating_point<float*>::value);
+  EXPECT_FALSE(is_floating_point<A>::value);
+  EXPECT_FALSE((is_floating_point<pair<int, int> >::value));
+
+  // Verify that cv-qualified floating point types are still floating, and
+  // cv-qualified non-floating types are still non-floating.
+  EXPECT_TRUE(is_floating_point<const float>::value);
+  EXPECT_TRUE(is_floating_point<volatile double>::value);
+  EXPECT_TRUE(is_floating_point<const volatile long double>::value);
+  EXPECT_FALSE(is_floating_point<const int>::value);
+  EXPECT_FALSE(is_floating_point<volatile string>::value);
+  EXPECT_FALSE(is_floating_point<const volatile char>::value);
+}
+
+TEST(TypeTraitsTest, TestIsPointer) {
+  // Verify that is_pointer is true for some pointer types.
+  EXPECT_TRUE(is_pointer<int*>::value);
+  EXPECT_TRUE(is_pointer<void*>::value);
+  EXPECT_TRUE(is_pointer<string*>::value);
+  EXPECT_TRUE(is_pointer<const void*>::value);
+  EXPECT_TRUE(is_pointer<volatile float* const*>::value);
+
+  // Verify that is_pointer is false for some non-pointer types.
+  EXPECT_FALSE(is_pointer<void>::value);
+  EXPECT_FALSE(is_pointer<float&>::value);
+  EXPECT_FALSE(is_pointer<long>::value);
+  EXPECT_FALSE(is_pointer<vector<int*> >::value);
+  EXPECT_FALSE(is_pointer<int[5]>::value);
+
+  // A function pointer is a pointer, but a function type, or a function
+  // reference type, is not.
+  EXPECT_TRUE(is_pointer<int (*)(int x)>::value);
+  EXPECT_FALSE(is_pointer<void(char x)>::value);
+  EXPECT_FALSE(is_pointer<double (&)(string x)>::value);
+
+  // Verify that is_pointer<T> is true for some cv-qualified pointer types,
+  // and false for some cv-qualified non-pointer types.
+  EXPECT_TRUE(is_pointer<int* const>::value);
+  EXPECT_TRUE(is_pointer<const void* volatile>::value);
+  EXPECT_TRUE(is_pointer<char** const volatile>::value);
+  EXPECT_FALSE(is_pointer<const int>::value);
+  EXPECT_FALSE(is_pointer<volatile vector<int*> >::value);
+  EXPECT_FALSE(is_pointer<const volatile double>::value);
+}
+
+TEST(TypeTraitsTest, TestIsEnum) {
+// is_enum isn't supported on MSVC or gcc 3.x
+#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
+  // Verify that is_enum is true for enum types.
+  EXPECT_TRUE(is_enum<G>::value);
+  EXPECT_TRUE(is_enum<const G>::value);
+  EXPECT_TRUE(is_enum<volatile G>::value);
+  EXPECT_TRUE(is_enum<const volatile G>::value);
+
+  // Verify that is_enum is false for a few non-enum types.
+  EXPECT_FALSE(is_enum<void>::value);
+  EXPECT_FALSE(is_enum<G&>::value);
+  EXPECT_FALSE(is_enum<G[1]>::value);
+  EXPECT_FALSE(is_enum<const G[1]>::value);
+  EXPECT_FALSE(is_enum<G[]>::value);
+  EXPECT_FALSE(is_enum<int>::value);
+  EXPECT_FALSE(is_enum<float>::value);
+  EXPECT_FALSE(is_enum<A>::value);
+  EXPECT_FALSE(is_enum<A*>::value);
+  EXPECT_FALSE(is_enum<const A>::value);
+  EXPECT_FALSE(is_enum<H>::value);
+  EXPECT_FALSE(is_enum<I>::value);
+  EXPECT_FALSE(is_enum<J>::value);
+  EXPECT_FALSE(is_enum<void()>::value);
+  EXPECT_FALSE(is_enum<void(*)()>::value);
+  EXPECT_FALSE(is_enum<int A::*>::value);
+  EXPECT_FALSE(is_enum<void (A::*)()>::value);
+#endif
+}
+
+TEST(TypeTraitsTest, TestIsReference) {
+  // Verifies that is_reference is true for all reference types.
+  typedef float& RefFloat;
+  EXPECT_TRUE(is_reference<float&>::value);
+  EXPECT_TRUE(is_reference<const int&>::value);
+  EXPECT_TRUE(is_reference<const int*&>::value);
+  EXPECT_TRUE(is_reference<int (&)(bool)>::value);
+  EXPECT_TRUE(is_reference<RefFloat>::value);
+  EXPECT_TRUE(is_reference<const RefFloat>::value);
+  EXPECT_TRUE(is_reference<volatile RefFloat>::value);
+  EXPECT_TRUE(is_reference<const volatile RefFloat>::value);
+
+
+  // Verifies that is_reference is false for all non-reference types.
+  EXPECT_FALSE(is_reference<float>::value);
+  EXPECT_FALSE(is_reference<const float>::value);
+  EXPECT_FALSE(is_reference<volatile float>::value);
+  EXPECT_FALSE(is_reference<const volatile float>::value);
+  EXPECT_FALSE(is_reference<const int*>::value);
+  EXPECT_FALSE(is_reference<int()>::value);
+  EXPECT_FALSE(is_reference<void(*)(const char&)>::value);
+}
+
+TEST(TypeTraitsTest, TestAddReference) {
+  COMPILE_ASSERT_TYPES_EQ(int&, add_reference<int>::type);
+  COMPILE_ASSERT_TYPES_EQ(const int&, add_reference<const int>::type);
+  COMPILE_ASSERT_TYPES_EQ(volatile int&,
+                          add_reference<volatile int>::type);
+  COMPILE_ASSERT_TYPES_EQ(const volatile int&,
+                          add_reference<const volatile int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int&, add_reference<int&>::type);
+  COMPILE_ASSERT_TYPES_EQ(const int&, add_reference<const int&>::type);
+  COMPILE_ASSERT_TYPES_EQ(volatile int&,
+                          add_reference<volatile int&>::type);
+  COMPILE_ASSERT_TYPES_EQ(const volatile int&,
+                          add_reference<const volatile int&>::type);
+}
+
+TEST(TypeTraitsTest, TestIsPod) {
+  // Verify that arithmetic types and pointers are marked as PODs.
+  EXPECT_TRUE(is_pod<bool>::value);
+  EXPECT_TRUE(is_pod<char>::value);
+  EXPECT_TRUE(is_pod<unsigned char>::value);
+  EXPECT_TRUE(is_pod<signed char>::value);
+  EXPECT_TRUE(is_pod<wchar_t>::value);
+  EXPECT_TRUE(is_pod<int>::value);
+  EXPECT_TRUE(is_pod<unsigned int>::value);
+  EXPECT_TRUE(is_pod<short>::value);
+  EXPECT_TRUE(is_pod<unsigned short>::value);
+  EXPECT_TRUE(is_pod<long>::value);
+  EXPECT_TRUE(is_pod<unsigned long>::value);
+  EXPECT_TRUE(is_pod<float>::value);
+  EXPECT_TRUE(is_pod<double>::value);
+  EXPECT_TRUE(is_pod<long double>::value);
+  EXPECT_TRUE(is_pod<string*>::value);
+  EXPECT_TRUE(is_pod<A*>::value);
+  EXPECT_TRUE(is_pod<const B*>::value);
+  EXPECT_TRUE(is_pod<C**>::value);
+  EXPECT_TRUE(is_pod<const int>::value);
+  EXPECT_TRUE(is_pod<char* volatile>::value);
+  EXPECT_TRUE(is_pod<const volatile double>::value);
+#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
+  EXPECT_TRUE(is_pod<G>::value);
+  EXPECT_TRUE(is_pod<const G>::value);
+  EXPECT_TRUE(is_pod<volatile G>::value);
+  EXPECT_TRUE(is_pod<const volatile G>::value);
+#endif
+
+  // Verify that some non-POD types are not marked as PODs.
+  EXPECT_FALSE(is_pod<void>::value);
+  EXPECT_FALSE(is_pod<string>::value);
+  EXPECT_FALSE((is_pod<pair<int, int> >::value));
+  EXPECT_FALSE(is_pod<A>::value);
+  EXPECT_FALSE(is_pod<B>::value);
+  EXPECT_FALSE(is_pod<C>::value);
+  EXPECT_FALSE(is_pod<const string>::value);
+  EXPECT_FALSE(is_pod<volatile A>::value);
+  EXPECT_FALSE(is_pod<const volatile B>::value);
+}
+
+TEST(TypeTraitsTest, TestHasTrivialConstructor) {
+  // Verify that arithmetic types and pointers have trivial constructors.
+  EXPECT_TRUE(has_trivial_constructor<bool>::value);
+  EXPECT_TRUE(has_trivial_constructor<char>::value);
+  EXPECT_TRUE(has_trivial_constructor<unsigned char>::value);
+  EXPECT_TRUE(has_trivial_constructor<signed char>::value);
+  EXPECT_TRUE(has_trivial_constructor<wchar_t>::value);
+  EXPECT_TRUE(has_trivial_constructor<int>::value);
+  EXPECT_TRUE(has_trivial_constructor<unsigned int>::value);
+  EXPECT_TRUE(has_trivial_constructor<short>::value);
+  EXPECT_TRUE(has_trivial_constructor<unsigned short>::value);
+  EXPECT_TRUE(has_trivial_constructor<long>::value);
+  EXPECT_TRUE(has_trivial_constructor<unsigned long>::value);
+  EXPECT_TRUE(has_trivial_constructor<float>::value);
+  EXPECT_TRUE(has_trivial_constructor<double>::value);
+  EXPECT_TRUE(has_trivial_constructor<long double>::value);
+  EXPECT_TRUE(has_trivial_constructor<string*>::value);
+  EXPECT_TRUE(has_trivial_constructor<A*>::value);
+  EXPECT_TRUE(has_trivial_constructor<const B*>::value);
+  EXPECT_TRUE(has_trivial_constructor<C**>::value);
+
+  // Verify that pairs and arrays of such types have trivial
+  // constructors.
+  typedef int int10[10];
+  EXPECT_TRUE((has_trivial_constructor<pair<int, char*> >::value));
+  EXPECT_TRUE(has_trivial_constructor<int10>::value);
+
+  // Verify that pairs of types without trivial constructors
+  // are not marked as trivial.
+  EXPECT_FALSE((has_trivial_constructor<pair<int, string> >::value));
+  EXPECT_FALSE((has_trivial_constructor<pair<string, int> >::value));
+
+  // Verify that types without trivial constructors are
+  // correctly marked as such.
+  EXPECT_FALSE(has_trivial_constructor<string>::value);
+  EXPECT_FALSE(has_trivial_constructor<vector<int> >::value);
+
+  // Verify that E, which we have declared to have a trivial
+  // constructor, is correctly marked as such.
+  EXPECT_TRUE(has_trivial_constructor<E>::value);
+}
+
+TEST(TypeTraitsTest, TestHasTrivialCopy) {
+  // Verify that arithmetic types and pointers have trivial copy
+  // constructors.
+  EXPECT_TRUE(has_trivial_copy<bool>::value);
+  EXPECT_TRUE(has_trivial_copy<char>::value);
+  EXPECT_TRUE(has_trivial_copy<unsigned char>::value);
+  EXPECT_TRUE(has_trivial_copy<signed char>::value);
+  EXPECT_TRUE(has_trivial_copy<wchar_t>::value);
+  EXPECT_TRUE(has_trivial_copy<int>::value);
+  EXPECT_TRUE(has_trivial_copy<unsigned int>::value);
+  EXPECT_TRUE(has_trivial_copy<short>::value);
+  EXPECT_TRUE(has_trivial_copy<unsigned short>::value);
+  EXPECT_TRUE(has_trivial_copy<long>::value);
+  EXPECT_TRUE(has_trivial_copy<unsigned long>::value);
+  EXPECT_TRUE(has_trivial_copy<float>::value);
+  EXPECT_TRUE(has_trivial_copy<double>::value);
+  EXPECT_TRUE(has_trivial_copy<long double>::value);
+  EXPECT_TRUE(has_trivial_copy<string*>::value);
+  EXPECT_TRUE(has_trivial_copy<A*>::value);
+  EXPECT_TRUE(has_trivial_copy<const B*>::value);
+  EXPECT_TRUE(has_trivial_copy<C**>::value);
+
+  // Verify that pairs and arrays of such types have trivial
+  // copy constructors.
+  typedef int int10[10];
+  EXPECT_TRUE((has_trivial_copy<pair<int, char*> >::value));
+  EXPECT_TRUE(has_trivial_copy<int10>::value);
+
+  // Verify that pairs of types without trivial copy constructors
+  // are not marked as trivial.
+  EXPECT_FALSE((has_trivial_copy<pair<int, string> >::value));
+  EXPECT_FALSE((has_trivial_copy<pair<string, int> >::value));
+
+  // Verify that types without trivial copy constructors are
+  // correctly marked as such.
+  EXPECT_FALSE(has_trivial_copy<string>::value);
+  EXPECT_FALSE(has_trivial_copy<vector<int> >::value);
+
+  // Verify that C, which we have declared to have a trivial
+  // copy constructor, is correctly marked as such.
+  EXPECT_TRUE(has_trivial_copy<C>::value);
+}
+
+TEST(TypeTraitsTest, TestHasTrivialAssign) {
+  // Verify that arithmetic types and pointers have trivial assignment
+  // operators.
+  EXPECT_TRUE(has_trivial_assign<bool>::value);
+  EXPECT_TRUE(has_trivial_assign<char>::value);
+  EXPECT_TRUE(has_trivial_assign<unsigned char>::value);
+  EXPECT_TRUE(has_trivial_assign<signed char>::value);
+  EXPECT_TRUE(has_trivial_assign<wchar_t>::value);
+  EXPECT_TRUE(has_trivial_assign<int>::value);
+  EXPECT_TRUE(has_trivial_assign<unsigned int>::value);
+  EXPECT_TRUE(has_trivial_assign<short>::value);
+  EXPECT_TRUE(has_trivial_assign<unsigned short>::value);
+  EXPECT_TRUE(has_trivial_assign<long>::value);
+  EXPECT_TRUE(has_trivial_assign<unsigned long>::value);
+  EXPECT_TRUE(has_trivial_assign<float>::value);
+  EXPECT_TRUE(has_trivial_assign<double>::value);
+  EXPECT_TRUE(has_trivial_assign<long double>::value);
+  EXPECT_TRUE(has_trivial_assign<string*>::value);
+  EXPECT_TRUE(has_trivial_assign<A*>::value);
+  EXPECT_TRUE(has_trivial_assign<const B*>::value);
+  EXPECT_TRUE(has_trivial_assign<C**>::value);
+
+  // Verify that pairs and arrays of such types have trivial
+  // assignment operators.
+  typedef int int10[10];
+  EXPECT_TRUE((has_trivial_assign<pair<int, char*> >::value));
+  EXPECT_TRUE(has_trivial_assign<int10>::value);
+
+  // Verify that pairs of types without trivial assignment operators
+  // are not marked as trivial.
+  EXPECT_FALSE((has_trivial_assign<pair<int, string> >::value));
+  EXPECT_FALSE((has_trivial_assign<pair<string, int> >::value));
+
+  // Verify that types without trivial assignment operators are
+  // correctly marked as such.
+  EXPECT_FALSE(has_trivial_assign<string>::value);
+  EXPECT_FALSE(has_trivial_assign<vector<int> >::value);
+
+  // Verify that D, which we have declared to have a trivial
+  // assignment operator, is correctly marked as such.
+  EXPECT_TRUE(has_trivial_assign<D>::value);
+}
+
+TEST(TypeTraitsTest, TestHasTrivialDestructor) {
+  // Verify that arithmetic types and pointers have trivial destructors.
+  EXPECT_TRUE(has_trivial_destructor<bool>::value);
+  EXPECT_TRUE(has_trivial_destructor<char>::value);
+  EXPECT_TRUE(has_trivial_destructor<unsigned char>::value);
+  EXPECT_TRUE(has_trivial_destructor<signed char>::value);
+  EXPECT_TRUE(has_trivial_destructor<wchar_t>::value);
+  EXPECT_TRUE(has_trivial_destructor<int>::value);
+  EXPECT_TRUE(has_trivial_destructor<unsigned int>::value);
+  EXPECT_TRUE(has_trivial_destructor<short>::value);
+  EXPECT_TRUE(has_trivial_destructor<unsigned short>::value);
+  EXPECT_TRUE(has_trivial_destructor<long>::value);
+  EXPECT_TRUE(has_trivial_destructor<unsigned long>::value);
+  EXPECT_TRUE(has_trivial_destructor<float>::value);
+  EXPECT_TRUE(has_trivial_destructor<double>::value);
+  EXPECT_TRUE(has_trivial_destructor<long double>::value);
+  EXPECT_TRUE(has_trivial_destructor<string*>::value);
+  EXPECT_TRUE(has_trivial_destructor<A*>::value);
+  EXPECT_TRUE(has_trivial_destructor<const B*>::value);
+  EXPECT_TRUE(has_trivial_destructor<C**>::value);
+
+  // Verify that pairs and arrays of such types have trivial
+  // destructors.
+  typedef int int10[10];
+  EXPECT_TRUE((has_trivial_destructor<pair<int, char*> >::value));
+  EXPECT_TRUE(has_trivial_destructor<int10>::value);
+
+  // Verify that pairs of types without trivial destructors
+  // are not marked as trivial.
+  EXPECT_FALSE((has_trivial_destructor<pair<int, string> >::value));
+  EXPECT_FALSE((has_trivial_destructor<pair<string, int> >::value));
+
+  // Verify that types without trivial destructors are
+  // correctly marked as such.
+  EXPECT_FALSE(has_trivial_destructor<string>::value);
+  EXPECT_FALSE(has_trivial_destructor<vector<int> >::value);
+
+  // Verify that F, which we have declared to have a trivial
+  // destructor, is correctly marked as such.
+  EXPECT_TRUE(has_trivial_destructor<F>::value);
+}
+
+// Tests remove_pointer.
+TEST(TypeTraitsTest, TestRemovePointer) {
+  COMPILE_ASSERT_TYPES_EQ(int, remove_pointer<int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int, remove_pointer<int*>::type);
+  COMPILE_ASSERT_TYPES_EQ(const int, remove_pointer<const int*>::type);
+  COMPILE_ASSERT_TYPES_EQ(int, remove_pointer<int* const>::type);
+  COMPILE_ASSERT_TYPES_EQ(int, remove_pointer<int* volatile>::type);
+}
+
+TEST(TypeTraitsTest, TestRemoveConst) {
+  COMPILE_ASSERT_TYPES_EQ(int, remove_const<int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int, remove_const<const int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int *, remove_const<int * const>::type);
+  // TR1 examples.
+  COMPILE_ASSERT_TYPES_EQ(const int *, remove_const<const int *>::type);
+  COMPILE_ASSERT_TYPES_EQ(volatile int,
+                          remove_const<const volatile int>::type);
+}
+
+TEST(TypeTraitsTest, TestRemoveVolatile) {
+  COMPILE_ASSERT_TYPES_EQ(int, remove_volatile<int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int, remove_volatile<volatile int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int *, remove_volatile<int * volatile>::type);
+  // TR1 examples.
+  COMPILE_ASSERT_TYPES_EQ(volatile int *,
+                          remove_volatile<volatile int *>::type);
+  COMPILE_ASSERT_TYPES_EQ(const int,
+                          remove_volatile<const volatile int>::type);
+}
+
+TEST(TypeTraitsTest, TestRemoveCV) {
+  COMPILE_ASSERT_TYPES_EQ(int, remove_cv<int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int, remove_cv<volatile int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int, remove_cv<const int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int *, remove_cv<int * const volatile>::type);
+  // TR1 examples.
+  COMPILE_ASSERT_TYPES_EQ(const volatile int *,
+                          remove_cv<const volatile int *>::type);
+  COMPILE_ASSERT_TYPES_EQ(int,
+                          remove_cv<const volatile int>::type);
+}
+
+TEST(TypeTraitsTest, TestRemoveReference) {
+  COMPILE_ASSERT_TYPES_EQ(int, remove_reference<int>::type);
+  COMPILE_ASSERT_TYPES_EQ(int, remove_reference<int&>::type);
+  COMPILE_ASSERT_TYPES_EQ(const int, remove_reference<const int&>::type);
+  COMPILE_ASSERT_TYPES_EQ(int*, remove_reference<int * &>::type);
+}
+
+TEST(TypeTraitsTest, TestIsSame) {
+  EXPECT_TRUE((is_same<int32, int32>::value));
+  EXPECT_FALSE((is_same<int32, int64>::value));
+  EXPECT_FALSE((is_same<int64, int32>::value));
+  EXPECT_FALSE((is_same<int, const int>::value));
+
+  EXPECT_TRUE((is_same<void, void>::value));
+  EXPECT_FALSE((is_same<void, int>::value));
+  EXPECT_FALSE((is_same<int, void>::value));
+
+  EXPECT_TRUE((is_same<int*, int*>::value));
+  EXPECT_TRUE((is_same<void*, void*>::value));
+  EXPECT_FALSE((is_same<int*, void*>::value));
+  EXPECT_FALSE((is_same<void*, int*>::value));
+  EXPECT_FALSE((is_same<void*, const void*>::value));
+  EXPECT_FALSE((is_same<void*, void* const>::value));
+
+  EXPECT_TRUE((is_same<Base*, Base*>::value));
+  EXPECT_TRUE((is_same<Derived*, Derived*>::value));
+  EXPECT_FALSE((is_same<Base*, Derived*>::value));
+  EXPECT_FALSE((is_same<Derived*, Base*>::value));
+}
+
+TEST(TypeTraitsTest, TestConvertible) {
+#if !(defined(__GNUC__) && __GNUC__ <= 3)
+  EXPECT_TRUE((is_convertible<int, int>::value));
+  EXPECT_TRUE((is_convertible<int, long>::value));
+  EXPECT_TRUE((is_convertible<long, int>::value));
+
+  EXPECT_TRUE((is_convertible<int*, void*>::value));
+  EXPECT_FALSE((is_convertible<void*, int*>::value));
+
+  EXPECT_TRUE((is_convertible<Derived*, Base*>::value));
+  EXPECT_FALSE((is_convertible<Base*, Derived*>::value));
+  EXPECT_TRUE((is_convertible<Derived*, const Base*>::value));
+  EXPECT_FALSE((is_convertible<const Derived*, Base*>::value));
+#endif
+}
+
+}  // anonymous namespace
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google