blob: 4378c9e3c730c5b7ba11e925a79cfb182f4ddf77 [file] [log] [blame]
James Kuszmaul48dd4c82021-10-27 20:04:08 -07001// Copyright 2005 Google Inc. All Rights Reserved.
2//
3// Redistribution and use in source and binary forms, with or without
4// modification, are permitted provided that the following conditions are
5// met:
6//
7// * Redistributions of source code must retain the above copyright
8// notice, this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above
10// copyright notice, this list of conditions and the following disclaimer
11// in the documentation and/or other materials provided with the
12// distribution.
13// * Neither the name of Google Inc. nor the names of its
14// contributors may be used to endorse or promote products derived from
15// this software without specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29#include "snappy-internal.h"
30#include "snappy-sinksource.h"
31#include "snappy.h"
32
33#if !defined(SNAPPY_HAVE_BMI2)
34// __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2
35// specifically, but it does define __AVX2__ when AVX2 support is available.
36// Fortunately, AVX2 was introduced in Haswell, just like BMI2.
37//
38// BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So,
39// GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which
40// case issuing BMI2 instructions results in a compiler error.
41#if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__))
42#define SNAPPY_HAVE_BMI2 1
43#else
44#define SNAPPY_HAVE_BMI2 0
45#endif
46#endif // !defined(SNAPPY_HAVE_BMI2)
47
48#if SNAPPY_HAVE_BMI2
49// Please do not replace with <x86intrin.h>. or with headers that assume more
50// advanced SSE versions without checking with all the OWNERS.
51#include <immintrin.h>
52#endif
53
54#include <algorithm>
55#include <array>
56#include <cstddef>
57#include <cstdint>
58#include <cstdio>
59#include <cstring>
60#include <string>
61#include <utility>
62#include <vector>
63
64namespace snappy {
65
66namespace {
67
68// The amount of slop bytes writers are using for unconditional copies.
69constexpr int kSlopBytes = 64;
70
71using internal::char_table;
72using internal::COPY_1_BYTE_OFFSET;
73using internal::COPY_2_BYTE_OFFSET;
74using internal::COPY_4_BYTE_OFFSET;
75using internal::kMaximumTagLength;
76using internal::LITERAL;
77#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
78using internal::V128;
79using internal::V128_Load;
80using internal::V128_LoadU;
81using internal::V128_Shuffle;
82using internal::V128_StoreU;
83using internal::V128_DupChar;
84#endif
85
86// We translate the information encoded in a tag through a lookup table to a
87// format that requires fewer instructions to decode. Effectively we store
88// the length minus the tag part of the offset. The lowest significant byte
89// thus stores the length. While total length - offset is given by
90// entry - ExtractOffset(type). The nice thing is that the subtraction
91// immediately sets the flags for the necessary check that offset >= length.
92// This folds the cmp with sub. We engineer the long literals and copy-4 to
93// always fail this check, so their presence doesn't affect the fast path.
94// To prevent literals from triggering the guard against offset < length (offset
95// does not apply to literals) the table is giving them a spurious offset of
96// 256.
97inline constexpr int16_t MakeEntry(int16_t len, int16_t offset) {
98 return len - (offset << 8);
99}
100
101inline constexpr int16_t LengthMinusOffset(int data, int type) {
102 return type == 3 ? 0xFF // copy-4 (or type == 3)
103 : type == 2 ? MakeEntry(data + 1, 0) // copy-2
104 : type == 1 ? MakeEntry((data & 7) + 4, data >> 3) // copy-1
105 : data < 60 ? MakeEntry(data + 1, 1) // note spurious offset.
106 : 0xFF; // long literal
107}
108
109inline constexpr int16_t LengthMinusOffset(uint8_t tag) {
110 return LengthMinusOffset(tag >> 2, tag & 3);
111}
112
113template <size_t... Ints>
114struct index_sequence {};
115
116template <std::size_t N, size_t... Is>
117struct make_index_sequence : make_index_sequence<N - 1, N - 1, Is...> {};
118
119template <size_t... Is>
120struct make_index_sequence<0, Is...> : index_sequence<Is...> {};
121
122template <size_t... seq>
123constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) {
124 return std::array<int16_t, 256>{LengthMinusOffset(seq)...};
125}
126
127alignas(64) const std::array<int16_t, 256> kLengthMinusOffset =
128 MakeTable(make_index_sequence<256>{});
129
130// Any hash function will produce a valid compressed bitstream, but a good
131// hash function reduces the number of collisions and thus yields better
132// compression for compressible input, and more speed for incompressible
133// input. Of course, it doesn't hurt if the hash function is reasonably fast
134// either, as it gets called a lot.
135inline uint32_t HashBytes(uint32_t bytes, uint32_t mask) {
136 constexpr uint32_t kMagic = 0x1e35a7bd;
137 return ((kMagic * bytes) >> (32 - kMaxHashTableBits)) & mask;
138}
139
140} // namespace
141
142size_t MaxCompressedLength(size_t source_bytes) {
143 // Compressed data can be defined as:
144 // compressed := item* literal*
145 // item := literal* copy
146 //
147 // The trailing literal sequence has a space blowup of at most 62/60
148 // since a literal of length 60 needs one tag byte + one extra byte
149 // for length information.
150 //
151 // Item blowup is trickier to measure. Suppose the "copy" op copies
152 // 4 bytes of data. Because of a special check in the encoding code,
153 // we produce a 4-byte copy only if the offset is < 65536. Therefore
154 // the copy op takes 3 bytes to encode, and this type of item leads
155 // to at most the 62/60 blowup for representing literals.
156 //
157 // Suppose the "copy" op copies 5 bytes of data. If the offset is big
158 // enough, it will take 5 bytes to encode the copy op. Therefore the
159 // worst case here is a one-byte literal followed by a five-byte copy.
160 // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
161 //
162 // This last factor dominates the blowup, so the final estimate is:
163 return 32 + source_bytes + source_bytes / 6;
164}
165
166namespace {
167
168void UnalignedCopy64(const void* src, void* dst) {
169 char tmp[8];
170 std::memcpy(tmp, src, 8);
171 std::memcpy(dst, tmp, 8);
172}
173
174void UnalignedCopy128(const void* src, void* dst) {
175 // std::memcpy() gets vectorized when the appropriate compiler options are
176 // used. For example, x86 compilers targeting SSE2+ will optimize to an SSE2
177 // load and store.
178 char tmp[16];
179 std::memcpy(tmp, src, 16);
180 std::memcpy(dst, tmp, 16);
181}
182
183template <bool use_16bytes_chunk>
184inline void ConditionalUnalignedCopy128(const char* src, char* dst) {
185 if (use_16bytes_chunk) {
186 UnalignedCopy128(src, dst);
187 } else {
188 UnalignedCopy64(src, dst);
189 UnalignedCopy64(src + 8, dst + 8);
190 }
191}
192
193// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
194// for handling COPY operations where the input and output regions may overlap.
195// For example, suppose:
196// src == "ab"
197// op == src + 2
198// op_limit == op + 20
199// After IncrementalCopySlow(src, op, op_limit), the result will have eleven
200// copies of "ab"
201// ababababababababababab
202// Note that this does not match the semantics of either std::memcpy() or
203// std::memmove().
204inline char* IncrementalCopySlow(const char* src, char* op,
205 char* const op_limit) {
206 // TODO: Remove pragma when LLVM is aware this
207 // function is only called in cold regions and when cold regions don't get
208 // vectorized or unrolled.
209#ifdef __clang__
210#pragma clang loop unroll(disable)
211#endif
212 while (op < op_limit) {
213 *op++ = *src++;
214 }
215 return op_limit;
216}
217
218#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
219
220// Computes the bytes for shuffle control mask (please read comments on
221// 'pattern_generation_masks' as well) for the given index_offset and
222// pattern_size. For example, when the 'offset' is 6, it will generate a
223// repeating pattern of size 6. So, the first 16 byte indexes will correspond to
224// the pattern-bytes {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3} and the
225// next 16 byte indexes will correspond to the pattern-bytes {4, 5, 0, 1, 2, 3,
226// 4, 5, 0, 1, 2, 3, 4, 5, 0, 1}. These byte index sequences are generated by
227// calling MakePatternMaskBytes(0, 6, index_sequence<16>()) and
228// MakePatternMaskBytes(16, 6, index_sequence<16>()) respectively.
229template <size_t... indexes>
230inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes(
231 int index_offset, int pattern_size, index_sequence<indexes...>) {
232 return {static_cast<char>((index_offset + indexes) % pattern_size)...};
233}
234
235// Computes the shuffle control mask bytes array for given pattern-sizes and
236// returns an array.
237template <size_t... pattern_sizes_minus_one>
238inline constexpr std::array<std::array<char, sizeof(V128)>,
239 sizeof...(pattern_sizes_minus_one)>
240MakePatternMaskBytesTable(int index_offset,
241 index_sequence<pattern_sizes_minus_one...>) {
242 return {
243 MakePatternMaskBytes(index_offset, pattern_sizes_minus_one + 1,
244 make_index_sequence</*indexes=*/sizeof(V128)>())...};
245}
246
247// This is an array of shuffle control masks that can be used as the source
248// operand for PSHUFB to permute the contents of the destination XMM register
249// into a repeating byte pattern.
250alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
251 16> pattern_generation_masks =
252 MakePatternMaskBytesTable(
253 /*index_offset=*/0,
254 /*pattern_sizes_minus_one=*/make_index_sequence<16>());
255
256// Similar to 'pattern_generation_masks', this table is used to "rotate" the
257// pattern so that we can copy the *next 16 bytes* consistent with the pattern.
258// Basically, pattern_reshuffle_masks is a continuation of
259// pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as
260// pattern_generation_masks for offsets 1, 2, 4, 8 and 16.
261alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
262 16> pattern_reshuffle_masks =
263 MakePatternMaskBytesTable(
264 /*index_offset=*/16,
265 /*pattern_sizes_minus_one=*/make_index_sequence<16>());
266
267SNAPPY_ATTRIBUTE_ALWAYS_INLINE
268static inline V128 LoadPattern(const char* src, const size_t pattern_size) {
269 V128 generation_mask = V128_Load(reinterpret_cast<const V128*>(
270 pattern_generation_masks[pattern_size - 1].data()));
271 // Uninitialized bytes are masked out by the shuffle mask.
272 // TODO: remove annotation and macro defs once MSan is fixed.
273 SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size);
274 return V128_Shuffle(V128_LoadU(reinterpret_cast<const V128*>(src)),
275 generation_mask);
276}
277
278SNAPPY_ATTRIBUTE_ALWAYS_INLINE
279static inline std::pair<V128 /* pattern */, V128 /* reshuffle_mask */>
280LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) {
281 V128 pattern = LoadPattern(src, pattern_size);
282
283 // This mask will generate the next 16 bytes in-place. Doing so enables us to
284 // write data by at most 4 V128_StoreU.
285 //
286 // For example, suppose pattern is: abcdefabcdefabcd
287 // Shuffling with this mask will generate: efabcdefabcdefab
288 // Shuffling again will generate: cdefabcdefabcdef
289 V128 reshuffle_mask = V128_Load(reinterpret_cast<const V128*>(
290 pattern_reshuffle_masks[pattern_size - 1].data()));
291 return {pattern, reshuffle_mask};
292}
293
294#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
295
296// Fallback for when we need to copy while extending the pattern, for example
297// copying 10 bytes from 3 positions back abc -> abcabcabcabca.
298//
299// REQUIRES: [dst - offset, dst + 64) is a valid address range.
300SNAPPY_ATTRIBUTE_ALWAYS_INLINE
301static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
302#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
303 if (SNAPPY_PREDICT_TRUE(offset <= 16)) {
304 switch (offset) {
305 case 0:
306 return false;
307 case 1: {
308 // TODO: Ideally we should memset, move back once the
309 // codegen issues are fixed.
310 V128 pattern = V128_DupChar(dst[-1]);
311 for (int i = 0; i < 4; i++) {
312 V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
313 }
314 return true;
315 }
316 case 2:
317 case 4:
318 case 8:
319 case 16: {
320 V128 pattern = LoadPattern(dst - offset, offset);
321 for (int i = 0; i < 4; i++) {
322 V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
323 }
324 return true;
325 }
326 default: {
327 auto pattern_and_reshuffle_mask =
328 LoadPatternAndReshuffleMask(dst - offset, offset);
329 V128 pattern = pattern_and_reshuffle_mask.first;
330 V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
331 for (int i = 0; i < 4; i++) {
332 V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
333 pattern = V128_Shuffle(pattern, reshuffle_mask);
334 }
335 return true;
336 }
337 }
338 }
339#else
340 if (SNAPPY_PREDICT_TRUE(offset < 16)) {
341 if (SNAPPY_PREDICT_FALSE(offset == 0)) return false;
342 // Extend the pattern to the first 16 bytes.
343 for (int i = 0; i < 16; i++) dst[i] = dst[i - offset];
344 // Find a multiple of pattern >= 16.
345 static std::array<uint8_t, 16> pattern_sizes = []() {
346 std::array<uint8_t, 16> res;
347 for (int i = 1; i < 16; i++) res[i] = (16 / i + 1) * i;
348 return res;
349 }();
350 offset = pattern_sizes[offset];
351 for (int i = 1; i < 4; i++) {
352 std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
353 }
354 return true;
355 }
356#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
357
358 // Very rare.
359 for (int i = 0; i < 4; i++) {
360 std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
361 }
362 return true;
363}
364
365// Copy [src, src+(op_limit-op)) to [op, op_limit) but faster than
366// IncrementalCopySlow. buf_limit is the address past the end of the writable
367// region of the buffer.
368inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
369 char* const buf_limit) {
370#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
371 constexpr int big_pattern_size_lower_bound = 16;
372#else
373 constexpr int big_pattern_size_lower_bound = 8;
374#endif
375
376 // Terminology:
377 //
378 // slop = buf_limit - op
379 // pat = op - src
380 // len = op_limit - op
381 assert(src < op);
382 assert(op < op_limit);
383 assert(op_limit <= buf_limit);
384 // NOTE: The copy tags use 3 or 6 bits to store the copy length, so len <= 64.
385 assert(op_limit - op <= 64);
386 // NOTE: In practice the compressor always emits len >= 4, so it is ok to
387 // assume that to optimize this function, but this is not guaranteed by the
388 // compression format, so we have to also handle len < 4 in case the input
389 // does not satisfy these conditions.
390
391 size_t pattern_size = op - src;
392 // The cases are split into different branches to allow the branch predictor,
393 // FDO, and static prediction hints to work better. For each input we list the
394 // ratio of invocations that match each condition.
395 //
396 // input slop < 16 pat < 8 len > 16
397 // ------------------------------------------
398 // html|html4|cp 0% 1.01% 27.73%
399 // urls 0% 0.88% 14.79%
400 // jpg 0% 64.29% 7.14%
401 // pdf 0% 2.56% 58.06%
402 // txt[1-4] 0% 0.23% 0.97%
403 // pb 0% 0.96% 13.88%
404 // bin 0.01% 22.27% 41.17%
405 //
406 // It is very rare that we don't have enough slop for doing block copies. It
407 // is also rare that we need to expand a pattern. Small patterns are common
408 // for incompressible formats and for those we are plenty fast already.
409 // Lengths are normally not greater than 16 but they vary depending on the
410 // input. In general if we always predict len <= 16 it would be an ok
411 // prediction.
412 //
413 // In order to be fast we want a pattern >= 16 bytes (or 8 bytes in non-SSE)
414 // and an unrolled loop copying 1x 16 bytes (or 2x 8 bytes in non-SSE) at a
415 // time.
416
417 // Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE)
418 // bytes.
419 if (pattern_size < big_pattern_size_lower_bound) {
420#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
421 // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
422 // to permute the register's contents in-place into a repeating sequence of
423 // the first "pattern_size" bytes.
424 // For example, suppose:
425 // src == "abc"
426 // op == op + 3
427 // After V128_Shuffle(), "pattern" will have five copies of "abc"
428 // followed by one byte of slop: abcabcabcabcabca.
429 //
430 // The non-SSE fallback implementation suffers from store-forwarding stalls
431 // because its loads and stores partly overlap. By expanding the pattern
432 // in-place, we avoid the penalty.
433
434 // Typically, the op_limit is the gating factor so try to simplify the loop
435 // based on that.
436 if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
437 auto pattern_and_reshuffle_mask =
438 LoadPatternAndReshuffleMask(src, pattern_size);
439 V128 pattern = pattern_and_reshuffle_mask.first;
440 V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
441
442 // There is at least one, and at most four 16-byte blocks. Writing four
443 // conditionals instead of a loop allows FDO to layout the code with
444 // respect to the actual probabilities of each length.
445 // TODO: Replace with loop with trip count hint.
446 V128_StoreU(reinterpret_cast<V128*>(op), pattern);
447
448 if (op + 16 < op_limit) {
449 pattern = V128_Shuffle(pattern, reshuffle_mask);
450 V128_StoreU(reinterpret_cast<V128*>(op + 16), pattern);
451 }
452 if (op + 32 < op_limit) {
453 pattern = V128_Shuffle(pattern, reshuffle_mask);
454 V128_StoreU(reinterpret_cast<V128*>(op + 32), pattern);
455 }
456 if (op + 48 < op_limit) {
457 pattern = V128_Shuffle(pattern, reshuffle_mask);
458 V128_StoreU(reinterpret_cast<V128*>(op + 48), pattern);
459 }
460 return op_limit;
461 }
462 char* const op_end = buf_limit - 15;
463 if (SNAPPY_PREDICT_TRUE(op < op_end)) {
464 auto pattern_and_reshuffle_mask =
465 LoadPatternAndReshuffleMask(src, pattern_size);
466 V128 pattern = pattern_and_reshuffle_mask.first;
467 V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
468
469 // This code path is relatively cold however so we save code size
470 // by avoiding unrolling and vectorizing.
471 //
472 // TODO: Remove pragma when when cold regions don't get
473 // vectorized or unrolled.
474#ifdef __clang__
475#pragma clang loop unroll(disable)
476#endif
477 do {
478 V128_StoreU(reinterpret_cast<V128*>(op), pattern);
479 pattern = V128_Shuffle(pattern, reshuffle_mask);
480 op += 16;
481 } while (SNAPPY_PREDICT_TRUE(op < op_end));
482 }
483 return IncrementalCopySlow(op - pattern_size, op, op_limit);
484#else // !SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
485 // If plenty of buffer space remains, expand the pattern to at least 8
486 // bytes. The way the following loop is written, we need 8 bytes of buffer
487 // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
488 // bytes if pattern_size is 2. Precisely encoding that is probably not
489 // worthwhile; instead, invoke the slow path if we cannot write 11 bytes
490 // (because 11 are required in the worst case).
491 if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) {
492 while (pattern_size < 8) {
493 UnalignedCopy64(src, op);
494 op += pattern_size;
495 pattern_size *= 2;
496 }
497 if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
498 } else {
499 return IncrementalCopySlow(src, op, op_limit);
500 }
501#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
502 }
503 assert(pattern_size >= big_pattern_size_lower_bound);
504 constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16;
505
506 // Copy 1x 16 bytes (or 2x 8 bytes in non-SSE) at a time. Because op - src can
507 // be < 16 in non-SSE, a single UnalignedCopy128 might overwrite data in op.
508 // UnalignedCopy64 is safe because expanding the pattern to at least 8 bytes
509 // guarantees that op - src >= 8.
510 //
511 // Typically, the op_limit is the gating factor so try to simplify the loop
512 // based on that.
513 if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
514 // There is at least one, and at most four 16-byte blocks. Writing four
515 // conditionals instead of a loop allows FDO to layout the code with respect
516 // to the actual probabilities of each length.
517 // TODO: Replace with loop with trip count hint.
518 ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
519 if (op + 16 < op_limit) {
520 ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 16, op + 16);
521 }
522 if (op + 32 < op_limit) {
523 ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 32, op + 32);
524 }
525 if (op + 48 < op_limit) {
526 ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 48, op + 48);
527 }
528 return op_limit;
529 }
530
531 // Fall back to doing as much as we can with the available slop in the
532 // buffer. This code path is relatively cold however so we save code size by
533 // avoiding unrolling and vectorizing.
534 //
535 // TODO: Remove pragma when when cold regions don't get vectorized
536 // or unrolled.
537#ifdef __clang__
538#pragma clang loop unroll(disable)
539#endif
540 for (char* op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
541 ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
542 }
543 if (op >= op_limit) return op_limit;
544
545 // We only take this branch if we didn't have enough slop and we can do a
546 // single 8 byte copy.
547 if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
548 UnalignedCopy64(src, op);
549 src += 8;
550 op += 8;
551 }
552 return IncrementalCopySlow(src, op, op_limit);
553}
554
555} // namespace
556
557template <bool allow_fast_path>
558static inline char* EmitLiteral(char* op, const char* literal, int len) {
559 // The vast majority of copies are below 16 bytes, for which a
560 // call to std::memcpy() is overkill. This fast path can sometimes
561 // copy up to 15 bytes too much, but that is okay in the
562 // main loop, since we have a bit to go on for both sides:
563 //
564 // - The input will always have kInputMarginBytes = 15 extra
565 // available bytes, as long as we're in the main loop, and
566 // if not, allow_fast_path = false.
567 // - The output will always have 32 spare bytes (see
568 // MaxCompressedLength).
569 assert(len > 0); // Zero-length literals are disallowed
570 int n = len - 1;
571 if (allow_fast_path && len <= 16) {
572 // Fits in tag byte
573 *op++ = LITERAL | (n << 2);
574
575 UnalignedCopy128(literal, op);
576 return op + len;
577 }
578
579 if (n < 60) {
580 // Fits in tag byte
581 *op++ = LITERAL | (n << 2);
582 } else {
583 int count = (Bits::Log2Floor(n) >> 3) + 1;
584 assert(count >= 1);
585 assert(count <= 4);
586 *op++ = LITERAL | ((59 + count) << 2);
587 // Encode in upcoming bytes.
588 // Write 4 bytes, though we may care about only 1 of them. The output buffer
589 // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
590 // here and there is a std::memcpy() of size 'len' below.
591 LittleEndian::Store32(op, n);
592 op += count;
593 }
594 std::memcpy(op, literal, len);
595 return op + len;
596}
597
598template <bool len_less_than_12>
599static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
600 assert(len <= 64);
601 assert(len >= 4);
602 assert(offset < 65536);
603 assert(len_less_than_12 == (len < 12));
604
605 if (len_less_than_12) {
606 uint32_t u = (len << 2) + (offset << 8);
607 uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0);
608 uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2);
609 // It turns out that offset < 2048 is a difficult to predict branch.
610 // `perf record` shows this is the highest percentage of branch misses in
611 // benchmarks. This code produces branch free code, the data dependency
612 // chain that bottlenecks the throughput is so long that a few extra
613 // instructions are completely free (IPC << 6 because of data deps).
614 u += offset < 2048 ? copy1 : copy2;
615 LittleEndian::Store32(op, u);
616 op += offset < 2048 ? 2 : 3;
617 } else {
618 // Write 4 bytes, though we only care about 3 of them. The output buffer
619 // is required to have some slack, so the extra byte won't overrun it.
620 uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
621 LittleEndian::Store32(op, u);
622 op += 3;
623 }
624 return op;
625}
626
627template <bool len_less_than_12>
628static inline char* EmitCopy(char* op, size_t offset, size_t len) {
629 assert(len_less_than_12 == (len < 12));
630 if (len_less_than_12) {
631 return EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
632 } else {
633 // A special case for len <= 64 might help, but so far measurements suggest
634 // it's in the noise.
635
636 // Emit 64 byte copies but make sure to keep at least four bytes reserved.
637 while (SNAPPY_PREDICT_FALSE(len >= 68)) {
638 op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64);
639 len -= 64;
640 }
641
642 // One or two copies will now finish the job.
643 if (len > 64) {
644 op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 60);
645 len -= 60;
646 }
647
648 // Emit remainder.
649 if (len < 12) {
650 op = EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
651 } else {
652 op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len);
653 }
654 return op;
655 }
656}
657
658bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
659 uint32_t v = 0;
660 const char* limit = start + n;
661 if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
662 *result = v;
663 return true;
664 } else {
665 return false;
666 }
667}
668
669namespace {
670uint32_t CalculateTableSize(uint32_t input_size) {
671 static_assert(
672 kMaxHashTableSize >= kMinHashTableSize,
673 "kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
674 if (input_size > kMaxHashTableSize) {
675 return kMaxHashTableSize;
676 }
677 if (input_size < kMinHashTableSize) {
678 return kMinHashTableSize;
679 }
680 // This is equivalent to Log2Ceiling(input_size), assuming input_size > 1.
681 // 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)).
682 return 2u << Bits::Log2Floor(input_size - 1);
683}
684} // namespace
685
686namespace internal {
687WorkingMemory::WorkingMemory(size_t input_size) {
688 const size_t max_fragment_size = std::min(input_size, kBlockSize);
689 const size_t table_size = CalculateTableSize(max_fragment_size);
690 size_ = table_size * sizeof(*table_) + max_fragment_size +
691 MaxCompressedLength(max_fragment_size);
692 mem_ = std::allocator<char>().allocate(size_);
693 table_ = reinterpret_cast<uint16_t*>(mem_);
694 input_ = mem_ + table_size * sizeof(*table_);
695 output_ = input_ + max_fragment_size;
696}
697
698WorkingMemory::~WorkingMemory() {
699 std::allocator<char>().deallocate(mem_, size_);
700}
701
702uint16_t* WorkingMemory::GetHashTable(size_t fragment_size,
703 int* table_size) const {
704 const size_t htsize = CalculateTableSize(fragment_size);
705 memset(table_, 0, htsize * sizeof(*table_));
706 *table_size = htsize;
707 return table_;
708}
709} // end namespace internal
710
711// Flat array compression that does not emit the "uncompressed length"
712// prefix. Compresses "input" string to the "*op" buffer.
713//
714// REQUIRES: "input" is at most "kBlockSize" bytes long.
715// REQUIRES: "op" points to an array of memory that is at least
716// "MaxCompressedLength(input.size())" in size.
717// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
718// REQUIRES: "table_size" is a power of two
719//
720// Returns an "end" pointer into "op" buffer.
721// "end - op" is the compressed size of "input".
722namespace internal {
723char* CompressFragment(const char* input, size_t input_size, char* op,
724 uint16_t* table, const int table_size) {
725 // "ip" is the input pointer, and "op" is the output pointer.
726 const char* ip = input;
727 assert(input_size <= kBlockSize);
728 assert((table_size & (table_size - 1)) == 0); // table must be power of two
729 const uint32_t mask = table_size - 1;
730 const char* ip_end = input + input_size;
731 const char* base_ip = ip;
732
733 const size_t kInputMarginBytes = 15;
734 if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
735 const char* ip_limit = input + input_size - kInputMarginBytes;
736
737 for (uint32_t preload = LittleEndian::Load32(ip + 1);;) {
738 // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
739 // [next_emit, ip_end) after the main loop.
740 const char* next_emit = ip++;
741 uint64_t data = LittleEndian::Load64(ip);
742 // The body of this loop calls EmitLiteral once and then EmitCopy one or
743 // more times. (The exception is that when we're close to exhausting
744 // the input we goto emit_remainder.)
745 //
746 // In the first iteration of this loop we're just starting, so
747 // there's nothing to copy, so calling EmitLiteral once is
748 // necessary. And we only start a new iteration when the
749 // current iteration has determined that a call to EmitLiteral will
750 // precede the next call to EmitCopy (if any).
751 //
752 // Step 1: Scan forward in the input looking for a 4-byte-long match.
753 // If we get close to exhausting the input then goto emit_remainder.
754 //
755 // Heuristic match skipping: If 32 bytes are scanned with no matches
756 // found, start looking only at every other byte. If 32 more bytes are
757 // scanned (or skipped), look at every third byte, etc.. When a match is
758 // found, immediately go back to looking at every byte. This is a small
759 // loss (~5% performance, ~0.1% density) for compressible data due to more
760 // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
761 // win since the compressor quickly "realizes" the data is incompressible
762 // and doesn't bother looking for matches everywhere.
763 //
764 // The "skip" variable keeps track of how many bytes there are since the
765 // last match; dividing it by 32 (ie. right-shifting by five) gives the
766 // number of bytes to move ahead for each iteration.
767 uint32_t skip = 32;
768
769 const char* candidate;
770 if (ip_limit - ip >= 16) {
771 auto delta = ip - base_ip;
772 for (int j = 0; j < 4; ++j) {
773 for (int k = 0; k < 4; ++k) {
774 int i = 4 * j + k;
775 // These for-loops are meant to be unrolled. So we can freely
776 // special case the first iteration to use the value already
777 // loaded in preload.
778 uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data);
779 assert(dword == LittleEndian::Load32(ip + i));
780 uint32_t hash = HashBytes(dword, mask);
781 candidate = base_ip + table[hash];
782 assert(candidate >= base_ip);
783 assert(candidate < ip + i);
784 table[hash] = delta + i;
785 if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) {
786 *op = LITERAL | (i << 2);
787 UnalignedCopy128(next_emit, op + 1);
788 ip += i;
789 op = op + i + 2;
790 goto emit_match;
791 }
792 data >>= 8;
793 }
794 data = LittleEndian::Load64(ip + 4 * j + 4);
795 }
796 ip += 16;
797 skip += 16;
798 }
799 while (true) {
800 assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
801 uint32_t hash = HashBytes(data, mask);
802 uint32_t bytes_between_hash_lookups = skip >> 5;
803 skip += bytes_between_hash_lookups;
804 const char* next_ip = ip + bytes_between_hash_lookups;
805 if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
806 ip = next_emit;
807 goto emit_remainder;
808 }
809 candidate = base_ip + table[hash];
810 assert(candidate >= base_ip);
811 assert(candidate < ip);
812
813 table[hash] = ip - base_ip;
814 if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
815 LittleEndian::Load32(candidate))) {
816 break;
817 }
818 data = LittleEndian::Load32(next_ip);
819 ip = next_ip;
820 }
821
822 // Step 2: A 4-byte match has been found. We'll later see if more
823 // than 4 bytes match. But, prior to the match, input
824 // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
825 assert(next_emit + 16 <= ip_end);
826 op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit);
827
828 // Step 3: Call EmitCopy, and then see if another EmitCopy could
829 // be our next move. Repeat until we find no match for the
830 // input immediately after what was consumed by the last EmitCopy call.
831 //
832 // If we exit this loop normally then we need to call EmitLiteral next,
833 // though we don't yet know how big the literal will be. We handle that
834 // by proceeding to the next iteration of the main loop. We also can exit
835 // this loop via goto if we get close to exhausting the input.
836 emit_match:
837 do {
838 // We have a 4-byte match at ip, and no need to emit any
839 // "literal bytes" prior to ip.
840 const char* base = ip;
841 std::pair<size_t, bool> p =
842 FindMatchLength(candidate + 4, ip + 4, ip_end, &data);
843 size_t matched = 4 + p.first;
844 ip += matched;
845 size_t offset = base - candidate;
846 assert(0 == memcmp(base, candidate, matched));
847 if (p.second) {
848 op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched);
849 } else {
850 op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
851 }
852 if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
853 goto emit_remainder;
854 }
855 // Expect 5 bytes to match
856 assert((data & 0xFFFFFFFFFF) ==
857 (LittleEndian::Load64(ip) & 0xFFFFFFFFFF));
858 // We are now looking for a 4-byte match again. We read
859 // table[Hash(ip, shift)] for that. To improve compression,
860 // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)].
861 table[HashBytes(LittleEndian::Load32(ip - 1), mask)] = ip - base_ip - 1;
862 uint32_t hash = HashBytes(data, mask);
863 candidate = base_ip + table[hash];
864 table[hash] = ip - base_ip;
865 // Measurements on the benchmarks have shown the following probabilities
866 // for the loop to exit (ie. avg. number of iterations is reciprocal).
867 // BM_Flat/6 txt1 p = 0.3-0.4
868 // BM_Flat/7 txt2 p = 0.35
869 // BM_Flat/8 txt3 p = 0.3-0.4
870 // BM_Flat/9 txt3 p = 0.34-0.4
871 // BM_Flat/10 pb p = 0.4
872 // BM_Flat/11 gaviota p = 0.1
873 // BM_Flat/12 cp p = 0.5
874 // BM_Flat/13 c p = 0.3
875 } while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate));
876 // Because the least significant 5 bytes matched, we can utilize data
877 // for the next iteration.
878 preload = data >> 8;
879 }
880 }
881
882emit_remainder:
883 // Emit the remaining bytes as a literal
884 if (ip < ip_end) {
885 op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip);
886 }
887
888 return op;
889}
890} // end namespace internal
891
892// Called back at avery compression call to trace parameters and sizes.
893static inline void Report(const char *algorithm, size_t compressed_size,
894 size_t uncompressed_size) {
895 // TODO: Switch to [[maybe_unused]] when we can assume C++17.
896 (void)algorithm;
897 (void)compressed_size;
898 (void)uncompressed_size;
899}
900
901// Signature of output types needed by decompression code.
902// The decompression code is templatized on a type that obeys this
903// signature so that we do not pay virtual function call overhead in
904// the middle of a tight decompression loop.
905//
906// class DecompressionWriter {
907// public:
908// // Called before decompression
909// void SetExpectedLength(size_t length);
910//
911// // For performance a writer may choose to donate the cursor variable to the
912// // decompression function. The decompression will inject it in all its
913// // function calls to the writer. Keeping the important output cursor as a
914// // function local stack variable allows the compiler to keep it in
915// // register, which greatly aids performance by avoiding loads and stores of
916// // this variable in the fast path loop iterations.
917// T GetOutputPtr() const;
918//
919// // At end of decompression the loop donates the ownership of the cursor
920// // variable back to the writer by calling this function.
921// void SetOutputPtr(T op);
922//
923// // Called after decompression
924// bool CheckLength() const;
925//
926// // Called repeatedly during decompression
927// // Each function get a pointer to the op (output pointer), that the writer
928// // can use and update. Note it's important that these functions get fully
929// // inlined so that no actual address of the local variable needs to be
930// // taken.
931// bool Append(const char* ip, size_t length, T* op);
932// bool AppendFromSelf(uint32_t offset, size_t length, T* op);
933//
934// // The rules for how TryFastAppend differs from Append are somewhat
935// // convoluted:
936// //
937// // - TryFastAppend is allowed to decline (return false) at any
938// // time, for any reason -- just "return false" would be
939// // a perfectly legal implementation of TryFastAppend.
940// // The intention is for TryFastAppend to allow a fast path
941// // in the common case of a small append.
942// // - TryFastAppend is allowed to read up to <available> bytes
943// // from the input buffer, whereas Append is allowed to read
944// // <length>. However, if it returns true, it must leave
945// // at least five (kMaximumTagLength) bytes in the input buffer
946// // afterwards, so that there is always enough space to read the
947// // next tag without checking for a refill.
948// // - TryFastAppend must always return decline (return false)
949// // if <length> is 61 or more, as in this case the literal length is not
950// // decoded fully. In practice, this should not be a big problem,
951// // as it is unlikely that one would implement a fast path accepting
952// // this much data.
953// //
954// bool TryFastAppend(const char* ip, size_t available, size_t length, T* op);
955// };
956
957static inline uint32_t ExtractLowBytes(uint32_t v, int n) {
958 assert(n >= 0);
959 assert(n <= 4);
960#if SNAPPY_HAVE_BMI2
961 return _bzhi_u32(v, 8 * n);
962#else
963 // This needs to be wider than uint32_t otherwise `mask << 32` will be
964 // undefined.
965 uint64_t mask = 0xffffffff;
966 return v & ~(mask << (8 * n));
967#endif
968}
969
970static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) {
971 assert(shift < 32);
972 static const uint8_t masks[] = {
973 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
974 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
975 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
976 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
977 return (value & masks[shift]) != 0;
978}
979
980inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) {
981 // TODO: Switch to [[maybe_unused]] when we can assume C++17.
982 (void)dst;
983 return offset != 0;
984}
985
986void MemCopy(char* dst, const uint8_t* src, size_t size) {
987 std::memcpy(dst, src, size);
988}
989
990void MemCopy(ptrdiff_t dst, const uint8_t* src, size_t size) {
991 // TODO: Switch to [[maybe_unused]] when we can assume C++17.
992 (void)dst;
993 (void)src;
994 (void)size;
995}
996
997void MemMove(char* dst, const void* src, size_t size) {
998 std::memmove(dst, src, size);
999}
1000
1001void MemMove(ptrdiff_t dst, const void* src, size_t size) {
1002 // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1003 (void)dst;
1004 (void)src;
1005 (void)size;
1006}
1007
1008SNAPPY_ATTRIBUTE_ALWAYS_INLINE
1009size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) {
1010 const uint8_t*& ip = *ip_p;
1011 // This section is crucial for the throughput of the decompression loop.
1012 // The latency of an iteration is fundamentally constrained by the
1013 // following data chain on ip.
1014 // ip -> c = Load(ip) -> delta1 = (c & 3) -> ip += delta1 or delta2
1015 // delta2 = ((c >> 2) + 1) ip++
1016 // This is different from X86 optimizations because ARM has conditional add
1017 // instruction (csinc) and it removes several register moves.
1018 const size_t tag_type = *tag & 3;
1019 const bool is_literal = (tag_type == 0);
1020 if (is_literal) {
1021 size_t next_literal_tag = (*tag >> 2) + 1;
1022 *tag = ip[next_literal_tag];
1023 ip += next_literal_tag + 1;
1024 } else {
1025 *tag = ip[tag_type];
1026 ip += tag_type + 1;
1027 }
1028 return tag_type;
1029}
1030
1031SNAPPY_ATTRIBUTE_ALWAYS_INLINE
1032size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) {
1033 const uint8_t*& ip = *ip_p;
1034 // This section is crucial for the throughput of the decompression loop.
1035 // The latency of an iteration is fundamentally constrained by the
1036 // following data chain on ip.
1037 // ip -> c = Load(ip) -> ip1 = ip + 1 + (c & 3) -> ip = ip1 or ip2
1038 // ip2 = ip + 2 + (c >> 2)
1039 // This amounts to 8 cycles.
1040 // 5 (load) + 1 (c & 3) + 1 (lea ip1, [ip + (c & 3) + 1]) + 1 (cmov)
1041 size_t literal_len = *tag >> 2;
1042 size_t tag_type = *tag;
1043 bool is_literal;
James Kuszmaul2cffe852021-10-27 20:17:49 -07001044#if defined(__GNUC__) && defined(__x86_64__) && defined(__GCC_ASM_FLAG_OUTPUTS__)
James Kuszmaul48dd4c82021-10-27 20:04:08 -07001045 // TODO clang misses the fact that the (c & 3) already correctly
1046 // sets the zero flag.
1047 asm("and $3, %k[tag_type]\n\t"
1048 : [tag_type] "+r"(tag_type), "=@ccz"(is_literal));
1049#else
1050 tag_type &= 3;
1051 is_literal = (tag_type == 0);
1052#endif
1053 // TODO
1054 // This is code is subtle. Loading the values first and then cmov has less
1055 // latency then cmov ip and then load. However clang would move the loads
1056 // in an optimization phase, volatile prevents this transformation.
1057 // Note that we have enough slop bytes (64) that the loads are always valid.
1058 size_t tag_literal =
1059 static_cast<const volatile uint8_t*>(ip)[1 + literal_len];
1060 size_t tag_copy = static_cast<const volatile uint8_t*>(ip)[tag_type];
1061 *tag = is_literal ? tag_literal : tag_copy;
1062 const uint8_t* ip_copy = ip + 1 + tag_type;
1063 const uint8_t* ip_literal = ip + 2 + literal_len;
1064 ip = is_literal ? ip_literal : ip_copy;
1065#if defined(__GNUC__) && defined(__x86_64__)
1066 // TODO Clang is "optimizing" zero-extension (a totally free
1067 // operation) this means that after the cmov of tag, it emits another movzb
1068 // tag, byte(tag). It really matters as it's on the core chain. This dummy
1069 // asm, persuades clang to do the zero-extension at the load (it's automatic)
1070 // removing the expensive movzb.
1071 asm("" ::"r"(tag_copy));
1072#endif
1073 return tag_type;
1074}
1075
1076// Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4.
1077inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) {
1078 // For x86 non-static storage works better. For ARM static storage is better.
1079 // TODO: Once the array is recognized as a register, improve the
1080 // readability for x86.
1081#if defined(__x86_64__)
1082 constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
1083 uint16_t result;
1084 memcpy(&result,
1085 reinterpret_cast<const char*>(&kExtractMasksCombined) + 2 * tag_type,
1086 sizeof(result));
1087 return val & result;
1088#elif defined(__aarch64__)
1089 constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
1090 return val & static_cast<uint32_t>(
1091 (kExtractMasksCombined >> (tag_type * 16)) & 0xFFFF);
1092#else
1093 static constexpr uint32_t kExtractMasks[4] = {0, 0xFF, 0xFFFF, 0};
1094 return val & kExtractMasks[tag_type];
1095#endif
1096};
1097
1098// Core decompression loop, when there is enough data available.
1099// Decompresses the input buffer [ip, ip_limit) into the output buffer
1100// [op, op_limit_min_slop). Returning when either we are too close to the end
1101// of the input buffer, or we exceed op_limit_min_slop or when a exceptional
1102// tag is encountered (literal of length > 60) or a copy-4.
1103// Returns {ip, op} at the points it stopped decoding.
1104// TODO This function probably does not need to be inlined, as it
1105// should decode large chunks at a time. This allows runtime dispatch to
1106// implementations based on CPU capability (BMI2 / perhaps 32 / 64 byte memcpy).
1107template <typename T>
1108std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
1109 const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base,
1110 ptrdiff_t op_limit_min_slop) {
1111 // We unroll the inner loop twice so we need twice the spare room.
1112 op_limit_min_slop -= kSlopBytes;
1113 if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) {
1114 const uint8_t* const ip_limit_min_slop = ip_limit - 2 * kSlopBytes - 1;
1115 ip++;
1116 // ip points just past the tag and we are touching at maximum kSlopBytes
1117 // in an iteration.
1118 size_t tag = ip[-1];
1119#if defined(__clang__) && defined(__aarch64__)
1120 // Workaround for https://bugs.llvm.org/show_bug.cgi?id=51317
1121 // when loading 1 byte, clang for aarch64 doesn't realize that it(ldrb)
1122 // comes with free zero-extension, so clang generates another
1123 // 'and xn, xm, 0xff' before it use that as the offset. This 'and' is
1124 // redundant and can be removed by adding this dummy asm, which gives
1125 // clang a hint that we're doing the zero-extension at the load.
1126 asm("" ::"r"(tag));
1127#endif
1128 do {
1129 // The throughput is limited by instructions, unrolling the inner loop
1130 // twice reduces the amount of instructions checking limits and also
1131 // leads to reduced mov's.
1132 for (int i = 0; i < 2; i++) {
1133 const uint8_t* old_ip = ip;
1134 assert(tag == ip[-1]);
1135 // For literals tag_type = 0, hence we will always obtain 0 from
1136 // ExtractLowBytes. For literals offset will thus be kLiteralOffset.
1137 ptrdiff_t len_min_offset = kLengthMinusOffset[tag];
1138#if defined(__aarch64__)
1139 size_t tag_type = AdvanceToNextTagARMOptimized(&ip, &tag);
1140#else
1141 size_t tag_type = AdvanceToNextTagX86Optimized(&ip, &tag);
1142#endif
1143 uint32_t next = LittleEndian::Load32(old_ip);
1144 size_t len = len_min_offset & 0xFF;
1145 len_min_offset -= ExtractOffset(next, tag_type);
1146 if (SNAPPY_PREDICT_FALSE(len_min_offset > 0)) {
1147 if (SNAPPY_PREDICT_FALSE(len & 0x80)) {
1148 // Exceptional case (long literal or copy 4).
1149 // Actually doing the copy here is negatively impacting the main
1150 // loop due to compiler incorrectly allocating a register for
1151 // this fallback. Hence we just break.
1152 break_loop:
1153 ip = old_ip;
1154 goto exit;
1155 }
1156 // Only copy-1 or copy-2 tags can get here.
1157 assert(tag_type == 1 || tag_type == 2);
1158 std::ptrdiff_t delta = op + len_min_offset - len;
1159 // Guard against copies before the buffer start.
1160 if (SNAPPY_PREDICT_FALSE(delta < 0 ||
1161 !Copy64BytesWithPatternExtension(
1162 op_base + op, len - len_min_offset))) {
1163 goto break_loop;
1164 }
1165 op += len;
1166 continue;
1167 }
1168 std::ptrdiff_t delta = op + len_min_offset - len;
1169 if (SNAPPY_PREDICT_FALSE(delta < 0)) {
1170 // Due to the spurious offset in literals have this will trigger
1171 // at the start of a block when op is still smaller than 256.
1172 if (tag_type != 0) goto break_loop;
1173 MemCopy(op_base + op, old_ip, 64);
1174 op += len;
1175 continue;
1176 }
1177
1178 // For copies we need to copy from op_base + delta, for literals
1179 // we need to copy from ip instead of from the stream.
1180 const void* from =
1181 tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip;
1182 MemMove(op_base + op, from, 64);
1183 op += len;
1184 }
1185 } while (ip < ip_limit_min_slop && op < op_limit_min_slop);
1186 exit:
1187 ip--;
1188 assert(ip <= ip_limit);
1189 }
1190 return {ip, op};
1191}
1192
1193// Helper class for decompression
1194class SnappyDecompressor {
1195 private:
1196 Source* reader_; // Underlying source of bytes to decompress
1197 const char* ip_; // Points to next buffered byte
1198 const char* ip_limit_; // Points just past buffered bytes
1199 // If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from
1200 // buffer.
1201 const char* ip_limit_min_maxtaglen_;
1202 uint32_t peeked_; // Bytes peeked from reader (need to skip)
1203 bool eof_; // Hit end of input without an error?
1204 char scratch_[kMaximumTagLength]; // See RefillTag().
1205
1206 // Ensure that all of the tag metadata for the next tag is available
1207 // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
1208 // if (ip_limit_ - ip_ < 5).
1209 //
1210 // Returns true on success, false on error or end of input.
1211 bool RefillTag();
1212
1213 void ResetLimit(const char* ip) {
1214 ip_limit_min_maxtaglen_ =
1215 ip_limit_ - std::min<ptrdiff_t>(ip_limit_ - ip, kMaximumTagLength - 1);
1216 }
1217
1218 public:
1219 explicit SnappyDecompressor(Source* reader)
1220 : reader_(reader), ip_(NULL), ip_limit_(NULL), peeked_(0), eof_(false) {}
1221
1222 ~SnappyDecompressor() {
1223 // Advance past any bytes we peeked at from the reader
1224 reader_->Skip(peeked_);
1225 }
1226
1227 // Returns true iff we have hit the end of the input without an error.
1228 bool eof() const { return eof_; }
1229
1230 // Read the uncompressed length stored at the start of the compressed data.
1231 // On success, stores the length in *result and returns true.
1232 // On failure, returns false.
1233 bool ReadUncompressedLength(uint32_t* result) {
1234 assert(ip_ == NULL); // Must not have read anything yet
1235 // Length is encoded in 1..5 bytes
1236 *result = 0;
1237 uint32_t shift = 0;
1238 while (true) {
1239 if (shift >= 32) return false;
1240 size_t n;
1241 const char* ip = reader_->Peek(&n);
1242 if (n == 0) return false;
1243 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
1244 reader_->Skip(1);
1245 uint32_t val = c & 0x7f;
1246 if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false;
1247 *result |= val << shift;
1248 if (c < 128) {
1249 break;
1250 }
1251 shift += 7;
1252 }
1253 return true;
1254 }
1255
1256 // Process the next item found in the input.
1257 // Returns true if successful, false on error or end of input.
1258 template <class Writer>
1259#if defined(__GNUC__) && defined(__x86_64__)
1260 __attribute__((aligned(32)))
1261#endif
1262 void
1263 DecompressAllTags(Writer* writer) {
1264 const char* ip = ip_;
1265 ResetLimit(ip);
1266 auto op = writer->GetOutputPtr();
1267 // We could have put this refill fragment only at the beginning of the loop.
1268 // However, duplicating it at the end of each branch gives the compiler more
1269 // scope to optimize the <ip_limit_ - ip> expression based on the local
1270 // context, which overall increases speed.
1271#define MAYBE_REFILL() \
1272 if (SNAPPY_PREDICT_FALSE(ip >= ip_limit_min_maxtaglen_)) { \
1273 ip_ = ip; \
1274 if (SNAPPY_PREDICT_FALSE(!RefillTag())) goto exit; \
1275 ip = ip_; \
1276 ResetLimit(ip); \
1277 } \
1278 preload = static_cast<uint8_t>(*ip)
1279
1280 // At the start of the for loop below the least significant byte of preload
1281 // contains the tag.
1282 uint32_t preload;
1283 MAYBE_REFILL();
1284 for (;;) {
1285 {
1286 ptrdiff_t op_limit_min_slop;
1287 auto op_base = writer->GetBase(&op_limit_min_slop);
1288 if (op_base) {
1289 auto res =
1290 DecompressBranchless(reinterpret_cast<const uint8_t*>(ip),
1291 reinterpret_cast<const uint8_t*>(ip_limit_),
1292 op - op_base, op_base, op_limit_min_slop);
1293 ip = reinterpret_cast<const char*>(res.first);
1294 op = op_base + res.second;
1295 MAYBE_REFILL();
1296 }
1297 }
1298 const uint8_t c = static_cast<uint8_t>(preload);
1299 ip++;
1300
1301 // Ratio of iterations that have LITERAL vs non-LITERAL for different
1302 // inputs.
1303 //
1304 // input LITERAL NON_LITERAL
1305 // -----------------------------------
1306 // html|html4|cp 23% 77%
1307 // urls 36% 64%
1308 // jpg 47% 53%
1309 // pdf 19% 81%
1310 // txt[1-4] 25% 75%
1311 // pb 24% 76%
1312 // bin 24% 76%
1313 if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
1314 size_t literal_length = (c >> 2) + 1u;
1315 if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length, &op)) {
1316 assert(literal_length < 61);
1317 ip += literal_length;
1318 // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
1319 // will not return true unless there's already at least five spare
1320 // bytes in addition to the literal.
1321 preload = static_cast<uint8_t>(*ip);
1322 continue;
1323 }
1324 if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
1325 // Long literal.
1326 const size_t literal_length_length = literal_length - 60;
1327 literal_length =
1328 ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
1329 1;
1330 ip += literal_length_length;
1331 }
1332
1333 size_t avail = ip_limit_ - ip;
1334 while (avail < literal_length) {
1335 if (!writer->Append(ip, avail, &op)) goto exit;
1336 literal_length -= avail;
1337 reader_->Skip(peeked_);
1338 size_t n;
1339 ip = reader_->Peek(&n);
1340 avail = n;
1341 peeked_ = avail;
1342 if (avail == 0) goto exit;
1343 ip_limit_ = ip + avail;
1344 ResetLimit(ip);
1345 }
1346 if (!writer->Append(ip, literal_length, &op)) goto exit;
1347 ip += literal_length;
1348 MAYBE_REFILL();
1349 } else {
1350 if (SNAPPY_PREDICT_FALSE((c & 3) == COPY_4_BYTE_OFFSET)) {
1351 const size_t copy_offset = LittleEndian::Load32(ip);
1352 const size_t length = (c >> 2) + 1;
1353 ip += 4;
1354
1355 if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
1356 } else {
1357 const ptrdiff_t entry = kLengthMinusOffset[c];
1358 preload = LittleEndian::Load32(ip);
1359 const uint32_t trailer = ExtractLowBytes(preload, c & 3);
1360 const uint32_t length = entry & 0xff;
1361 assert(length > 0);
1362
1363 // copy_offset/256 is encoded in bits 8..10. By just fetching
1364 // those bits, we get copy_offset (since the bit-field starts at
1365 // bit 8).
1366 const uint32_t copy_offset = trailer - entry + length;
1367 if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
1368
1369 ip += (c & 3);
1370 // By using the result of the previous load we reduce the critical
1371 // dependency chain of ip to 4 cycles.
1372 preload >>= (c & 3) * 8;
1373 if (ip < ip_limit_min_maxtaglen_) continue;
1374 }
1375 MAYBE_REFILL();
1376 }
1377 }
1378#undef MAYBE_REFILL
1379 exit:
1380 writer->SetOutputPtr(op);
1381 }
1382};
1383
1384constexpr uint32_t CalculateNeeded(uint8_t tag) {
1385 return ((tag & 3) == 0 && tag >= (60 * 4))
1386 ? (tag >> 2) - 58
1387 : (0x05030201 >> ((tag * 8) & 31)) & 0xFF;
1388}
1389
1390#if __cplusplus >= 201402L
1391constexpr bool VerifyCalculateNeeded() {
1392 for (int i = 0; i < 1; i++) {
1393 if (CalculateNeeded(i) != (char_table[i] >> 11) + 1) return false;
1394 }
1395 return true;
1396}
1397
1398// Make sure CalculateNeeded is correct by verifying it against the established
1399// table encoding the number of added bytes needed.
1400static_assert(VerifyCalculateNeeded(), "");
1401#endif // c++14
1402
1403bool SnappyDecompressor::RefillTag() {
1404 const char* ip = ip_;
1405 if (ip == ip_limit_) {
1406 // Fetch a new fragment from the reader
1407 reader_->Skip(peeked_); // All peeked bytes are used up
1408 size_t n;
1409 ip = reader_->Peek(&n);
1410 peeked_ = n;
1411 eof_ = (n == 0);
1412 if (eof_) return false;
1413 ip_limit_ = ip + n;
1414 }
1415
1416 // Read the tag character
1417 assert(ip < ip_limit_);
1418 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
1419 // At this point make sure that the data for the next tag is consecutive.
1420 // For copy 1 this means the next 2 bytes (tag and 1 byte offset)
1421 // For copy 2 the next 3 bytes (tag and 2 byte offset)
1422 // For copy 4 the next 5 bytes (tag and 4 byte offset)
1423 // For all small literals we only need 1 byte buf for literals 60...63 the
1424 // length is encoded in 1...4 extra bytes.
1425 const uint32_t needed = CalculateNeeded(c);
1426 assert(needed <= sizeof(scratch_));
1427
1428 // Read more bytes from reader if needed
1429 uint32_t nbuf = ip_limit_ - ip;
1430 if (nbuf < needed) {
1431 // Stitch together bytes from ip and reader to form the word
1432 // contents. We store the needed bytes in "scratch_". They
1433 // will be consumed immediately by the caller since we do not
1434 // read more than we need.
1435 std::memmove(scratch_, ip, nbuf);
1436 reader_->Skip(peeked_); // All peeked bytes are used up
1437 peeked_ = 0;
1438 while (nbuf < needed) {
1439 size_t length;
1440 const char* src = reader_->Peek(&length);
1441 if (length == 0) return false;
1442 uint32_t to_add = std::min<uint32_t>(needed - nbuf, length);
1443 std::memcpy(scratch_ + nbuf, src, to_add);
1444 nbuf += to_add;
1445 reader_->Skip(to_add);
1446 }
1447 assert(nbuf == needed);
1448 ip_ = scratch_;
1449 ip_limit_ = scratch_ + needed;
1450 } else if (nbuf < kMaximumTagLength) {
1451 // Have enough bytes, but move into scratch_ so that we do not
1452 // read past end of input
1453 std::memmove(scratch_, ip, nbuf);
1454 reader_->Skip(peeked_); // All peeked bytes are used up
1455 peeked_ = 0;
1456 ip_ = scratch_;
1457 ip_limit_ = scratch_ + nbuf;
1458 } else {
1459 // Pass pointer to buffer returned by reader_.
1460 ip_ = ip;
1461 }
1462 return true;
1463}
1464
1465template <typename Writer>
1466static bool InternalUncompress(Source* r, Writer* writer) {
1467 // Read the uncompressed length from the front of the compressed input
1468 SnappyDecompressor decompressor(r);
1469 uint32_t uncompressed_len = 0;
1470 if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
1471
1472 return InternalUncompressAllTags(&decompressor, writer, r->Available(),
1473 uncompressed_len);
1474}
1475
1476template <typename Writer>
1477static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
1478 Writer* writer, uint32_t compressed_len,
1479 uint32_t uncompressed_len) {
1480 Report("snappy_uncompress", compressed_len, uncompressed_len);
1481
1482 writer->SetExpectedLength(uncompressed_len);
1483
1484 // Process the entire input
1485 decompressor->DecompressAllTags(writer);
1486 writer->Flush();
1487 return (decompressor->eof() && writer->CheckLength());
1488}
1489
1490bool GetUncompressedLength(Source* source, uint32_t* result) {
1491 SnappyDecompressor decompressor(source);
1492 return decompressor.ReadUncompressedLength(result);
1493}
1494
1495size_t Compress(Source* reader, Sink* writer) {
1496 size_t written = 0;
1497 size_t N = reader->Available();
1498 const size_t uncompressed_size = N;
1499 char ulength[Varint::kMax32];
1500 char* p = Varint::Encode32(ulength, N);
1501 writer->Append(ulength, p - ulength);
1502 written += (p - ulength);
1503
1504 internal::WorkingMemory wmem(N);
1505
1506 while (N > 0) {
1507 // Get next block to compress (without copying if possible)
1508 size_t fragment_size;
1509 const char* fragment = reader->Peek(&fragment_size);
1510 assert(fragment_size != 0); // premature end of input
1511 const size_t num_to_read = std::min(N, kBlockSize);
1512 size_t bytes_read = fragment_size;
1513
1514 size_t pending_advance = 0;
1515 if (bytes_read >= num_to_read) {
1516 // Buffer returned by reader is large enough
1517 pending_advance = num_to_read;
1518 fragment_size = num_to_read;
1519 } else {
1520 char* scratch = wmem.GetScratchInput();
1521 std::memcpy(scratch, fragment, bytes_read);
1522 reader->Skip(bytes_read);
1523
1524 while (bytes_read < num_to_read) {
1525 fragment = reader->Peek(&fragment_size);
1526 size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
1527 std::memcpy(scratch + bytes_read, fragment, n);
1528 bytes_read += n;
1529 reader->Skip(n);
1530 }
1531 assert(bytes_read == num_to_read);
1532 fragment = scratch;
1533 fragment_size = num_to_read;
1534 }
1535 assert(fragment_size == num_to_read);
1536
1537 // Get encoding table for compression
1538 int table_size;
1539 uint16_t* table = wmem.GetHashTable(num_to_read, &table_size);
1540
1541 // Compress input_fragment and append to dest
1542 const int max_output = MaxCompressedLength(num_to_read);
1543
1544 // Need a scratch buffer for the output, in case the byte sink doesn't
1545 // have room for us directly.
1546
1547 // Since we encode kBlockSize regions followed by a region
1548 // which is <= kBlockSize in length, a previously allocated
1549 // scratch_output[] region is big enough for this iteration.
1550 char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput());
1551 char* end = internal::CompressFragment(fragment, fragment_size, dest, table,
1552 table_size);
1553 writer->Append(dest, end - dest);
1554 written += (end - dest);
1555
1556 N -= num_to_read;
1557 reader->Skip(pending_advance);
1558 }
1559
1560 Report("snappy_compress", written, uncompressed_size);
1561
1562 return written;
1563}
1564
1565// -----------------------------------------------------------------------
1566// IOVec interfaces
1567// -----------------------------------------------------------------------
1568
1569// A type that writes to an iovec.
1570// Note that this is not a "ByteSink", but a type that matches the
1571// Writer template argument to SnappyDecompressor::DecompressAllTags().
1572class SnappyIOVecWriter {
1573 private:
1574 // output_iov_end_ is set to iov + count and used to determine when
1575 // the end of the iovs is reached.
1576 const struct iovec* output_iov_end_;
1577
1578#if !defined(NDEBUG)
1579 const struct iovec* output_iov_;
1580#endif // !defined(NDEBUG)
1581
1582 // Current iov that is being written into.
1583 const struct iovec* curr_iov_;
1584
1585 // Pointer to current iov's write location.
1586 char* curr_iov_output_;
1587
1588 // Remaining bytes to write into curr_iov_output.
1589 size_t curr_iov_remaining_;
1590
1591 // Total bytes decompressed into output_iov_ so far.
1592 size_t total_written_;
1593
1594 // Maximum number of bytes that will be decompressed into output_iov_.
1595 size_t output_limit_;
1596
1597 static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) {
1598 return reinterpret_cast<char*>(iov->iov_base) + offset;
1599 }
1600
1601 public:
1602 // Does not take ownership of iov. iov must be valid during the
1603 // entire lifetime of the SnappyIOVecWriter.
1604 inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
1605 : output_iov_end_(iov + iov_count),
1606#if !defined(NDEBUG)
1607 output_iov_(iov),
1608#endif // !defined(NDEBUG)
1609 curr_iov_(iov),
1610 curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base)
1611 : nullptr),
1612 curr_iov_remaining_(iov_count ? iov->iov_len : 0),
1613 total_written_(0),
1614 output_limit_(-1) {
1615 }
1616
1617 inline void SetExpectedLength(size_t len) { output_limit_ = len; }
1618
1619 inline bool CheckLength() const { return total_written_ == output_limit_; }
1620
1621 inline bool Append(const char* ip, size_t len, char**) {
1622 if (total_written_ + len > output_limit_) {
1623 return false;
1624 }
1625
1626 return AppendNoCheck(ip, len);
1627 }
1628
1629 char* GetOutputPtr() { return nullptr; }
1630 char* GetBase(ptrdiff_t*) { return nullptr; }
1631 void SetOutputPtr(char* op) {
1632 // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1633 (void)op;
1634 }
1635
1636 inline bool AppendNoCheck(const char* ip, size_t len) {
1637 while (len > 0) {
1638 if (curr_iov_remaining_ == 0) {
1639 // This iovec is full. Go to the next one.
1640 if (curr_iov_ + 1 >= output_iov_end_) {
1641 return false;
1642 }
1643 ++curr_iov_;
1644 curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
1645 curr_iov_remaining_ = curr_iov_->iov_len;
1646 }
1647
1648 const size_t to_write = std::min(len, curr_iov_remaining_);
1649 std::memcpy(curr_iov_output_, ip, to_write);
1650 curr_iov_output_ += to_write;
1651 curr_iov_remaining_ -= to_write;
1652 total_written_ += to_write;
1653 ip += to_write;
1654 len -= to_write;
1655 }
1656
1657 return true;
1658 }
1659
1660 inline bool TryFastAppend(const char* ip, size_t available, size_t len,
1661 char**) {
1662 const size_t space_left = output_limit_ - total_written_;
1663 if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
1664 curr_iov_remaining_ >= 16) {
1665 // Fast path, used for the majority (about 95%) of invocations.
1666 UnalignedCopy128(ip, curr_iov_output_);
1667 curr_iov_output_ += len;
1668 curr_iov_remaining_ -= len;
1669 total_written_ += len;
1670 return true;
1671 }
1672
1673 return false;
1674 }
1675
1676 inline bool AppendFromSelf(size_t offset, size_t len, char**) {
1677 // See SnappyArrayWriter::AppendFromSelf for an explanation of
1678 // the "offset - 1u" trick.
1679 if (offset - 1u >= total_written_) {
1680 return false;
1681 }
1682 const size_t space_left = output_limit_ - total_written_;
1683 if (len > space_left) {
1684 return false;
1685 }
1686
1687 // Locate the iovec from which we need to start the copy.
1688 const iovec* from_iov = curr_iov_;
1689 size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_;
1690 while (offset > 0) {
1691 if (from_iov_offset >= offset) {
1692 from_iov_offset -= offset;
1693 break;
1694 }
1695
1696 offset -= from_iov_offset;
1697 --from_iov;
1698#if !defined(NDEBUG)
1699 assert(from_iov >= output_iov_);
1700#endif // !defined(NDEBUG)
1701 from_iov_offset = from_iov->iov_len;
1702 }
1703
1704 // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
1705 // the current iovec.
1706 while (len > 0) {
1707 assert(from_iov <= curr_iov_);
1708 if (from_iov != curr_iov_) {
1709 const size_t to_copy =
1710 std::min(from_iov->iov_len - from_iov_offset, len);
1711 AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy);
1712 len -= to_copy;
1713 if (len > 0) {
1714 ++from_iov;
1715 from_iov_offset = 0;
1716 }
1717 } else {
1718 size_t to_copy = curr_iov_remaining_;
1719 if (to_copy == 0) {
1720 // This iovec is full. Go to the next one.
1721 if (curr_iov_ + 1 >= output_iov_end_) {
1722 return false;
1723 }
1724 ++curr_iov_;
1725 curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
1726 curr_iov_remaining_ = curr_iov_->iov_len;
1727 continue;
1728 }
1729 if (to_copy > len) {
1730 to_copy = len;
1731 }
1732 assert(to_copy > 0);
1733
1734 IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
1735 curr_iov_output_, curr_iov_output_ + to_copy,
1736 curr_iov_output_ + curr_iov_remaining_);
1737 curr_iov_output_ += to_copy;
1738 curr_iov_remaining_ -= to_copy;
1739 from_iov_offset += to_copy;
1740 total_written_ += to_copy;
1741 len -= to_copy;
1742 }
1743 }
1744
1745 return true;
1746 }
1747
1748 inline void Flush() {}
1749};
1750
1751bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
1752 const struct iovec* iov, size_t iov_cnt) {
1753 ByteArraySource reader(compressed, compressed_length);
1754 return RawUncompressToIOVec(&reader, iov, iov_cnt);
1755}
1756
1757bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
1758 size_t iov_cnt) {
1759 SnappyIOVecWriter output(iov, iov_cnt);
1760 return InternalUncompress(compressed, &output);
1761}
1762
1763// -----------------------------------------------------------------------
1764// Flat array interfaces
1765// -----------------------------------------------------------------------
1766
1767// A type that writes to a flat array.
1768// Note that this is not a "ByteSink", but a type that matches the
1769// Writer template argument to SnappyDecompressor::DecompressAllTags().
1770class SnappyArrayWriter {
1771 private:
1772 char* base_;
1773 char* op_;
1774 char* op_limit_;
1775 // If op < op_limit_min_slop_ then it's safe to unconditionally write
1776 // kSlopBytes starting at op.
1777 char* op_limit_min_slop_;
1778
1779 public:
1780 inline explicit SnappyArrayWriter(char* dst)
1781 : base_(dst),
1782 op_(dst),
1783 op_limit_(dst),
1784 op_limit_min_slop_(dst) {} // Safe default see invariant.
1785
1786 inline void SetExpectedLength(size_t len) {
1787 op_limit_ = op_ + len;
1788 // Prevent pointer from being past the buffer.
1789 op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, len);
1790 }
1791
1792 inline bool CheckLength() const { return op_ == op_limit_; }
1793
1794 char* GetOutputPtr() { return op_; }
1795 char* GetBase(ptrdiff_t* op_limit_min_slop) {
1796 *op_limit_min_slop = op_limit_min_slop_ - base_;
1797 return base_;
1798 }
1799 void SetOutputPtr(char* op) { op_ = op; }
1800
1801 inline bool Append(const char* ip, size_t len, char** op_p) {
1802 char* op = *op_p;
1803 const size_t space_left = op_limit_ - op;
1804 if (space_left < len) return false;
1805 std::memcpy(op, ip, len);
1806 *op_p = op + len;
1807 return true;
1808 }
1809
1810 inline bool TryFastAppend(const char* ip, size_t available, size_t len,
1811 char** op_p) {
1812 char* op = *op_p;
1813 const size_t space_left = op_limit_ - op;
1814 if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
1815 // Fast path, used for the majority (about 95%) of invocations.
1816 UnalignedCopy128(ip, op);
1817 *op_p = op + len;
1818 return true;
1819 } else {
1820 return false;
1821 }
1822 }
1823
1824 SNAPPY_ATTRIBUTE_ALWAYS_INLINE
1825 inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
1826 assert(len > 0);
1827 char* const op = *op_p;
1828 assert(op >= base_);
1829 char* const op_end = op + len;
1830
1831 // Check if we try to append from before the start of the buffer.
1832 if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - base_) < offset))
1833 return false;
1834
1835 if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
1836 op >= op_limit_min_slop_ || offset < len)) {
1837 if (op_end > op_limit_ || offset == 0) return false;
1838 *op_p = IncrementalCopy(op - offset, op, op_end, op_limit_);
1839 return true;
1840 }
1841 std::memmove(op, op - offset, kSlopBytes);
1842 *op_p = op_end;
1843 return true;
1844 }
1845 inline size_t Produced() const {
1846 assert(op_ >= base_);
1847 return op_ - base_;
1848 }
1849 inline void Flush() {}
1850};
1851
1852bool RawUncompress(const char* compressed, size_t compressed_length,
1853 char* uncompressed) {
1854 ByteArraySource reader(compressed, compressed_length);
1855 return RawUncompress(&reader, uncompressed);
1856}
1857
1858bool RawUncompress(Source* compressed, char* uncompressed) {
1859 SnappyArrayWriter output(uncompressed);
1860 return InternalUncompress(compressed, &output);
1861}
1862
1863bool Uncompress(const char* compressed, size_t compressed_length,
1864 std::string* uncompressed) {
1865 size_t ulength;
1866 if (!GetUncompressedLength(compressed, compressed_length, &ulength)) {
1867 return false;
1868 }
1869 // On 32-bit builds: max_size() < kuint32max. Check for that instead
1870 // of crashing (e.g., consider externally specified compressed data).
1871 if (ulength > uncompressed->max_size()) {
1872 return false;
1873 }
1874 STLStringResizeUninitialized(uncompressed, ulength);
1875 return RawUncompress(compressed, compressed_length,
1876 string_as_array(uncompressed));
1877}
1878
1879// A Writer that drops everything on the floor and just does validation
1880class SnappyDecompressionValidator {
1881 private:
1882 size_t expected_;
1883 size_t produced_;
1884
1885 public:
1886 inline SnappyDecompressionValidator() : expected_(0), produced_(0) {}
1887 inline void SetExpectedLength(size_t len) { expected_ = len; }
1888 size_t GetOutputPtr() { return produced_; }
1889 size_t GetBase(ptrdiff_t* op_limit_min_slop) {
1890 *op_limit_min_slop = std::numeric_limits<ptrdiff_t>::max() - kSlopBytes + 1;
1891 return 1;
1892 }
1893 void SetOutputPtr(size_t op) { produced_ = op; }
1894 inline bool CheckLength() const { return expected_ == produced_; }
1895 inline bool Append(const char* ip, size_t len, size_t* produced) {
1896 // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1897 (void)ip;
1898
1899 *produced += len;
1900 return *produced <= expected_;
1901 }
1902 inline bool TryFastAppend(const char* ip, size_t available, size_t length,
1903 size_t* produced) {
1904 // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1905 (void)ip;
1906 (void)available;
1907 (void)length;
1908 (void)produced;
1909
1910 return false;
1911 }
1912 inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) {
1913 // See SnappyArrayWriter::AppendFromSelf for an explanation of
1914 // the "offset - 1u" trick.
1915 if (*produced <= offset - 1u) return false;
1916 *produced += len;
1917 return *produced <= expected_;
1918 }
1919 inline void Flush() {}
1920};
1921
1922bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) {
1923 ByteArraySource reader(compressed, compressed_length);
1924 SnappyDecompressionValidator writer;
1925 return InternalUncompress(&reader, &writer);
1926}
1927
1928bool IsValidCompressed(Source* compressed) {
1929 SnappyDecompressionValidator writer;
1930 return InternalUncompress(compressed, &writer);
1931}
1932
1933void RawCompress(const char* input, size_t input_length, char* compressed,
1934 size_t* compressed_length) {
1935 ByteArraySource reader(input, input_length);
1936 UncheckedByteArraySink writer(compressed);
1937 Compress(&reader, &writer);
1938
1939 // Compute how many bytes were added
1940 *compressed_length = (writer.CurrentDestination() - compressed);
1941}
1942
1943size_t Compress(const char* input, size_t input_length,
1944 std::string* compressed) {
1945 // Pre-grow the buffer to the max length of the compressed output
1946 STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length));
1947
1948 size_t compressed_length;
1949 RawCompress(input, input_length, string_as_array(compressed),
1950 &compressed_length);
1951 compressed->resize(compressed_length);
1952 return compressed_length;
1953}
1954
1955// -----------------------------------------------------------------------
1956// Sink interface
1957// -----------------------------------------------------------------------
1958
1959// A type that decompresses into a Sink. The template parameter
1960// Allocator must export one method "char* Allocate(int size);", which
1961// allocates a buffer of "size" and appends that to the destination.
1962template <typename Allocator>
1963class SnappyScatteredWriter {
1964 Allocator allocator_;
1965
1966 // We need random access into the data generated so far. Therefore
1967 // we keep track of all of the generated data as an array of blocks.
1968 // All of the blocks except the last have length kBlockSize.
1969 std::vector<char*> blocks_;
1970 size_t expected_;
1971
1972 // Total size of all fully generated blocks so far
1973 size_t full_size_;
1974
1975 // Pointer into current output block
1976 char* op_base_; // Base of output block
1977 char* op_ptr_; // Pointer to next unfilled byte in block
1978 char* op_limit_; // Pointer just past block
1979 // If op < op_limit_min_slop_ then it's safe to unconditionally write
1980 // kSlopBytes starting at op.
1981 char* op_limit_min_slop_;
1982
1983 inline size_t Size() const { return full_size_ + (op_ptr_ - op_base_); }
1984
1985 bool SlowAppend(const char* ip, size_t len);
1986 bool SlowAppendFromSelf(size_t offset, size_t len);
1987
1988 public:
1989 inline explicit SnappyScatteredWriter(const Allocator& allocator)
1990 : allocator_(allocator),
1991 full_size_(0),
1992 op_base_(NULL),
1993 op_ptr_(NULL),
1994 op_limit_(NULL),
1995 op_limit_min_slop_(NULL) {}
1996 char* GetOutputPtr() { return op_ptr_; }
1997 char* GetBase(ptrdiff_t* op_limit_min_slop) {
1998 *op_limit_min_slop = op_limit_min_slop_ - op_base_;
1999 return op_base_;
2000 }
2001 void SetOutputPtr(char* op) { op_ptr_ = op; }
2002
2003 inline void SetExpectedLength(size_t len) {
2004 assert(blocks_.empty());
2005 expected_ = len;
2006 }
2007
2008 inline bool CheckLength() const { return Size() == expected_; }
2009
2010 // Return the number of bytes actually uncompressed so far
2011 inline size_t Produced() const { return Size(); }
2012
2013 inline bool Append(const char* ip, size_t len, char** op_p) {
2014 char* op = *op_p;
2015 size_t avail = op_limit_ - op;
2016 if (len <= avail) {
2017 // Fast path
2018 std::memcpy(op, ip, len);
2019 *op_p = op + len;
2020 return true;
2021 } else {
2022 op_ptr_ = op;
2023 bool res = SlowAppend(ip, len);
2024 *op_p = op_ptr_;
2025 return res;
2026 }
2027 }
2028
2029 inline bool TryFastAppend(const char* ip, size_t available, size_t length,
2030 char** op_p) {
2031 char* op = *op_p;
2032 const int space_left = op_limit_ - op;
2033 if (length <= 16 && available >= 16 + kMaximumTagLength &&
2034 space_left >= 16) {
2035 // Fast path, used for the majority (about 95%) of invocations.
2036 UnalignedCopy128(ip, op);
2037 *op_p = op + length;
2038 return true;
2039 } else {
2040 return false;
2041 }
2042 }
2043
2044 inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
2045 char* op = *op_p;
2046 assert(op >= op_base_);
2047 // Check if we try to append from before the start of the buffer.
2048 if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
2049 static_cast<size_t>(op - op_base_) < offset ||
2050 op >= op_limit_min_slop_ || offset < len)) {
2051 if (offset == 0) return false;
2052 if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - op_base_) < offset ||
2053 op + len > op_limit_)) {
2054 op_ptr_ = op;
2055 bool res = SlowAppendFromSelf(offset, len);
2056 *op_p = op_ptr_;
2057 return res;
2058 }
2059 *op_p = IncrementalCopy(op - offset, op, op + len, op_limit_);
2060 return true;
2061 }
2062 // Fast path
2063 char* const op_end = op + len;
2064 std::memmove(op, op - offset, kSlopBytes);
2065 *op_p = op_end;
2066 return true;
2067 }
2068
2069 // Called at the end of the decompress. We ask the allocator
2070 // write all blocks to the sink.
2071 inline void Flush() { allocator_.Flush(Produced()); }
2072};
2073
2074template <typename Allocator>
2075bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
2076 size_t avail = op_limit_ - op_ptr_;
2077 while (len > avail) {
2078 // Completely fill this block
2079 std::memcpy(op_ptr_, ip, avail);
2080 op_ptr_ += avail;
2081 assert(op_limit_ - op_ptr_ == 0);
2082 full_size_ += (op_ptr_ - op_base_);
2083 len -= avail;
2084 ip += avail;
2085
2086 // Bounds check
2087 if (full_size_ + len > expected_) return false;
2088
2089 // Make new block
2090 size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
2091 op_base_ = allocator_.Allocate(bsize);
2092 op_ptr_ = op_base_;
2093 op_limit_ = op_base_ + bsize;
2094 op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, bsize);
2095
2096 blocks_.push_back(op_base_);
2097 avail = bsize;
2098 }
2099
2100 std::memcpy(op_ptr_, ip, len);
2101 op_ptr_ += len;
2102 return true;
2103}
2104
2105template <typename Allocator>
2106bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
2107 size_t len) {
2108 // Overflow check
2109 // See SnappyArrayWriter::AppendFromSelf for an explanation of
2110 // the "offset - 1u" trick.
2111 const size_t cur = Size();
2112 if (offset - 1u >= cur) return false;
2113 if (expected_ - cur < len) return false;
2114
2115 // Currently we shouldn't ever hit this path because Compress() chops the
2116 // input into blocks and does not create cross-block copies. However, it is
2117 // nice if we do not rely on that, since we can get better compression if we
2118 // allow cross-block copies and thus might want to change the compressor in
2119 // the future.
2120 // TODO Replace this with a properly optimized path. This is not
2121 // triggered right now. But this is so super slow, that it would regress
2122 // performance unacceptably if triggered.
2123 size_t src = cur - offset;
2124 char* op = op_ptr_;
2125 while (len-- > 0) {
2126 char c = blocks_[src >> kBlockLog][src & (kBlockSize - 1)];
2127 if (!Append(&c, 1, &op)) {
2128 op_ptr_ = op;
2129 return false;
2130 }
2131 src++;
2132 }
2133 op_ptr_ = op;
2134 return true;
2135}
2136
2137class SnappySinkAllocator {
2138 public:
2139 explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {}
2140 ~SnappySinkAllocator() {}
2141
2142 char* Allocate(int size) {
2143 Datablock block(new char[size], size);
2144 blocks_.push_back(block);
2145 return block.data;
2146 }
2147
2148 // We flush only at the end, because the writer wants
2149 // random access to the blocks and once we hand the
2150 // block over to the sink, we can't access it anymore.
2151 // Also we don't write more than has been actually written
2152 // to the blocks.
2153 void Flush(size_t size) {
2154 size_t size_written = 0;
2155 for (Datablock& block : blocks_) {
2156 size_t block_size = std::min<size_t>(block.size, size - size_written);
2157 dest_->AppendAndTakeOwnership(block.data, block_size,
2158 &SnappySinkAllocator::Deleter, NULL);
2159 size_written += block_size;
2160 }
2161 blocks_.clear();
2162 }
2163
2164 private:
2165 struct Datablock {
2166 char* data;
2167 size_t size;
2168 Datablock(char* p, size_t s) : data(p), size(s) {}
2169 };
2170
2171 static void Deleter(void* arg, const char* bytes, size_t size) {
2172 // TODO: Switch to [[maybe_unused]] when we can assume C++17.
2173 (void)arg;
2174 (void)size;
2175
2176 delete[] bytes;
2177 }
2178
2179 Sink* dest_;
2180 std::vector<Datablock> blocks_;
2181
2182 // Note: copying this object is allowed
2183};
2184
2185size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
2186 SnappySinkAllocator allocator(uncompressed);
2187 SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
2188 InternalUncompress(compressed, &writer);
2189 return writer.Produced();
2190}
2191
2192bool Uncompress(Source* compressed, Sink* uncompressed) {
2193 // Read the uncompressed length from the front of the compressed input
2194 SnappyDecompressor decompressor(compressed);
2195 uint32_t uncompressed_len = 0;
2196 if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
2197 return false;
2198 }
2199
2200 char c;
2201 size_t allocated_size;
2202 char* buf = uncompressed->GetAppendBufferVariable(1, uncompressed_len, &c, 1,
2203 &allocated_size);
2204
2205 const size_t compressed_len = compressed->Available();
2206 // If we can get a flat buffer, then use it, otherwise do block by block
2207 // uncompression
2208 if (allocated_size >= uncompressed_len) {
2209 SnappyArrayWriter writer(buf);
2210 bool result = InternalUncompressAllTags(&decompressor, &writer,
2211 compressed_len, uncompressed_len);
2212 uncompressed->Append(buf, writer.Produced());
2213 return result;
2214 } else {
2215 SnappySinkAllocator allocator(uncompressed);
2216 SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
2217 return InternalUncompressAllTags(&decompressor, &writer, compressed_len,
2218 uncompressed_len);
2219 }
2220}
2221
2222} // namespace snappy