svn commit: r462635 - in head/www/waterfox: . files
Jan Beich
jbeich at FreeBSD.org
Thu Feb 22 19:51:18 UTC 2018
Author: jbeich
Date: Thu Feb 22 19:51:16 2018
New Revision: 462635
URL: https://svnweb.freebsd.org/changeset/ports/462635
Log:
www/waterfox: update to 56.0.4.20
- Apply some FF59 fixes
Changes: https://github.com/MrAlex94/Waterfox/compare/56.0.4...e03e284b083d
Security: HPKP/HSTS
Added:
head/www/waterfox/files/patch-bug1398021 (contents, props changed)
head/www/waterfox/files/patch-bug1399412 (contents, props changed)
head/www/waterfox/files/patch-bug1430557 (contents, props changed)
head/www/waterfox/files/patch-bug1437087 (contents, props changed)
Deleted:
head/www/waterfox/files/patch-bug895096
Modified:
head/www/waterfox/Makefile (contents, props changed)
head/www/waterfox/distinfo (contents, props changed)
head/www/waterfox/files/patch-bug1388020 (contents, props changed)
Modified: head/www/waterfox/Makefile
==============================================================================
--- head/www/waterfox/Makefile Thu Feb 22 19:51:15 2018 (r462634)
+++ head/www/waterfox/Makefile Thu Feb 22 19:51:16 2018 (r462635)
@@ -1,8 +1,8 @@
# $FreeBSD$
PORTNAME= waterfox
-DISTVERSION= 56.0.4
-PORTREVISION= 6
+DISTVERSION= 56.0.4-20
+DISTVERSIONSUFFIX= -ge03e284b083d
CATEGORIES= www ipv6
MAINTAINER= jbeich at FreeBSD.org
Modified: head/www/waterfox/distinfo
==============================================================================
--- head/www/waterfox/distinfo Thu Feb 22 19:51:15 2018 (r462634)
+++ head/www/waterfox/distinfo Thu Feb 22 19:51:16 2018 (r462635)
@@ -1,3 +1,3 @@
-TIMESTAMP = 1517598190
-SHA256 (MrAlex94-Waterfox-56.0.4_GH0.tar.gz) = 291a7aa8e541802d1705cf68c694e300f9cb14fffc6c1d24e51b9ed486cd44b7
-SIZE (MrAlex94-Waterfox-56.0.4_GH0.tar.gz) = 394233214
+TIMESTAMP = 1519322366
+SHA256 (MrAlex94-Waterfox-56.0.4-20-ge03e284b083d_GH0.tar.gz) = f8103fee10acf9e32fc8d9ea8fca6418a557888a2bda781a92e96beb305c8c4e
+SIZE (MrAlex94-Waterfox-56.0.4-20-ge03e284b083d_GH0.tar.gz) = 394048388
Modified: head/www/waterfox/files/patch-bug1388020
==============================================================================
--- head/www/waterfox/files/patch-bug1388020 Thu Feb 22 19:51:15 2018 (r462634)
+++ head/www/waterfox/files/patch-bug1388020 Thu Feb 22 19:51:16 2018 (r462635)
@@ -74,7 +74,7 @@ index a48a9081e155..e0a74920fbe5 100644
MOZ_ASSERT(aExternalImageId.isSome());
result = new WebRenderTextureHost(aDesc, aFlags, result, aExternalImageId.ref());
}
-@@ -269,13 +268,50 @@ CreateBackendIndependentTextureHost(const SurfaceDescriptor& aDesc,
+@@ -269,13 +268,49 @@ CreateBackendIndependentTextureHost(const SurfaceDescriptor& aDesc,
const MemoryOrShmem& data = bufferDesc.data();
switch (data.type()) {
case MemoryOrShmem::TShmem: {
@@ -98,8 +98,7 @@ index a48a9081e155..e0a74920fbe5 100644
+ case BufferDescriptor::TYCbCrDescriptor: {
+ const YCbCrDescriptor& ycbcr = desc.get_YCbCrDescriptor();
+ reqSize =
-+ ImageDataSerializer::ComputeYCbCrBufferSize(ycbcr.ySize(), ycbcr.ySize().width,
-+ ycbcr.cbCrSize(), ycbcr.cbCrSize().width);
++ ImageDataSerializer::ComputeYCbCrBufferSize(ycbcr.ySize(), ycbcr.cbCrSize());
+ break;
+ }
+ case BufferDescriptor::TRGBDescriptor: {
Added: head/www/waterfox/files/patch-bug1398021
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ head/www/waterfox/files/patch-bug1398021 Thu Feb 22 19:51:16 2018 (r462635)
@@ -0,0 +1,2737 @@
+commit 909ebdebd850
+Author: Ryan VanderMeulen <ryanvm at gmail.com>
+Date: Tue Sep 12 09:00:37 2017 -0400
+
+ Bug 1398021 - Update lz4 to version 1.8.0. r=froydnj
+---
+ mfbt/Compression.cpp | 7 +-
+ mfbt/lz4.c | 1593 ++++++++++++++++++++++++++++++--------------------
+ mfbt/lz4.h | 531 +++++++++++------
+ 3 files changed, 1319 insertions(+), 812 deletions(-)
+
+diff --git mfbt/Compression.cpp mfbt/Compression.cpp
+index 3f5fff53c425..ed0bb4ef72c7 100644
+--- mfbt/Compression.cpp
++++ mfbt/Compression.cpp
+@@ -36,7 +36,8 @@ LZ4::compress(const char* aSource, size_t aInputSize, char* aDest)
+ {
+ CheckedInt<int> inputSizeChecked = aInputSize;
+ MOZ_ASSERT(inputSizeChecked.isValid());
+- return LZ4_compress(aSource, aDest, inputSizeChecked.value());
++ return LZ4_compress_default(aSource, aDest, inputSizeChecked.value(),
++ LZ4_compressBound(inputSizeChecked.value()));
+ }
+
+ size_t
+@@ -47,8 +48,8 @@ LZ4::compressLimitedOutput(const char* aSource, size_t aInputSize, char* aDest,
+ MOZ_ASSERT(inputSizeChecked.isValid());
+ CheckedInt<int> maxOutputSizeChecked = aMaxOutputSize;
+ MOZ_ASSERT(maxOutputSizeChecked.isValid());
+- return LZ4_compress_limitedOutput(aSource, aDest, inputSizeChecked.value(),
+- maxOutputSizeChecked.value());
++ return LZ4_compress_default(aSource, aDest, inputSizeChecked.value(),
++ maxOutputSizeChecked.value());
+ }
+
+ bool
+diff --git mfbt/lz4.c mfbt/lz4.c
+index c416fe815a42..5752b4d17b0e 100644
+--- mfbt/lz4.c
++++ mfbt/lz4.c
+@@ -1,6 +1,7 @@
+ /*
+ LZ4 - Fast LZ compression algorithm
+- Copyright (C) 2011-2014, Yann Collet.
++ Copyright (C) 2011-2017, Yann Collet.
++
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+@@ -27,118 +28,96 @@
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+- - LZ4 source repository : http://code.google.com/p/lz4/
+- - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
++ - LZ4 homepage : http://www.lz4.org
++ - LZ4 source repository : https://github.com/lz4/lz4
+ */
+
+-/**************************************
+- Tuning parameters
++
++/*-************************************
++* Tuning parameters
+ **************************************/
+ /*
+- * HEAPMODE :
++ * LZ4_HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+- * in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
++ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
+-#define HEAPMODE 0
+-
+-
+-/**************************************
+- CPU Feature Detection
+-**************************************/
+-/* 32 or 64 bits ? */
+-#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
+- || defined(__powerpc64__) || defined(__powerpc64le__) \
+- || defined(__ppc64__) || defined(__ppc64le__) \
+- || defined(__PPC64__) || defined(__PPC64LE__) \
+- || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) \
+- || (defined(__mips64) && defined(_ABI64))) /* Detects 64 bits mode */
+-# define LZ4_ARCH64 1
+-#else
+-# define LZ4_ARCH64 0
++#ifndef LZ4_HEAPMODE
++# define LZ4_HEAPMODE 0
+ #endif
+
+ /*
+- * Little Endian or Big Endian ?
+- * Overwrite the #define below if you know your architecture endianess
++ * ACCELERATION_DEFAULT :
++ * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
+ */
+-#include <stdlib.h> /* Apparently required to detect endianess */
+-#if defined (__GLIBC__)
+-# include <endian.h>
+-# if (__BYTE_ORDER == __BIG_ENDIAN)
+-# define LZ4_BIG_ENDIAN 1
++#define ACCELERATION_DEFAULT 1
++
++
++/*-************************************
++* CPU Feature Detection
++**************************************/
++/* LZ4_FORCE_MEMORY_ACCESS
++ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
++ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
++ * The below switch allow to select different access method for improved performance.
++ * Method 0 (default) : use `memcpy()`. Safe and portable.
++ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
++ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
++ * Method 2 : direct access. This method is portable but violate C standard.
++ * It can generate buggy code on targets which assembly generation depends on alignment.
++ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
++ * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
++ * Prefer these methods in priority order (0 > 1 > 2)
++ */
++#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
++# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
++# define LZ4_FORCE_MEMORY_ACCESS 2
++# elif defined(__INTEL_COMPILER) || defined(__GNUC__)
++# define LZ4_FORCE_MEMORY_ACCESS 1
+ # endif
+-#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
+-# define LZ4_BIG_ENDIAN 1
+-#elif defined(__sparc) || defined(__sparc__) \
+- || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
+- || defined(__hpux) || defined(__hppa) \
+- || defined(_MIPSEB) || defined(__s390__)
+-# define LZ4_BIG_ENDIAN 1
+-#else
+-/* Little Endian assumed. PDP Endian and other very rare endian format are unsupported. */
+ #endif
+
+ /*
+- * Unaligned memory access is automatically enabled for "common" CPU, such as x86.
+- * For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
+- * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
++ * LZ4_FORCE_SW_BITCOUNT
++ * Define this parameter if your target system or compiler does not support hardware bit count
+ */
+-#if defined(__ARM_FEATURE_UNALIGNED)
+-# define LZ4_FORCE_UNALIGNED_ACCESS 1
+-#endif
+-
+-/* Define this parameter if your target system or compiler does not support hardware bit count */
+ #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
+ # define LZ4_FORCE_SW_BITCOUNT
+ #endif
+
+-/*
+- * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
+- * This option may provide a small boost to performance for some big endian cpu, although probably modest.
+- * You may set this option to 1 if data will remain within closed environment.
+- * This option is useless on Little_Endian CPU (such as x86)
+- */
+
+-/* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
++/*-************************************
++* Dependency
++**************************************/
++#include "lz4.h"
++/* see also "memory routines" below */
+
+
+-/**************************************
+- Compiler Options
++/*-************************************
++* Compiler Options
+ **************************************/
+-#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
+-/* "restrict" is a known keyword */
+-#else
+-# define restrict /* Disable restrict */
+-#endif
+-
+ #ifdef _MSC_VER /* Visual Studio */
+-# define FORCE_INLINE static __forceinline
+-# include <intrin.h> /* For Visual 2005 */
+-# if LZ4_ARCH64 /* 64-bits */
+-# pragma intrinsic(_BitScanForward64) /* For Visual 2005 */
+-# pragma intrinsic(_BitScanReverse64) /* For Visual 2005 */
+-# else /* 32-bits */
+-# pragma intrinsic(_BitScanForward) /* For Visual 2005 */
+-# pragma intrinsic(_BitScanReverse) /* For Visual 2005 */
+-# endif
++# include <intrin.h>
+ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+-#else
+-# ifdef __GNUC__
+-# define FORCE_INLINE static inline __attribute__((always_inline))
+-# else
+-# define FORCE_INLINE static inline
+-# endif
+-#endif
+-
+-#ifdef _MSC_VER /* Visual Studio */
+-# define lz4_bswap16(x) _byteswap_ushort(x)
+-#else
+-# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
+-#endif
+-
+-#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
++# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
++#endif /* _MSC_VER */
+
+-#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
++#ifndef FORCE_INLINE
++# ifdef _MSC_VER /* Visual Studio */
++# define FORCE_INLINE static __forceinline
++# else
++# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
++# ifdef __GNUC__
++# define FORCE_INLINE static inline __attribute__((always_inline))
++# else
++# define FORCE_INLINE static inline
++# endif
++# else
++# define FORCE_INLINE static
++# endif /* __STDC_VERSION__ */
++# endif /* _MSC_VER */
++#endif /* FORCE_INLINE */
++
++#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
+ # define expect(expr,value) (__builtin_expect ((expr),(value)) )
+ #else
+ # define expect(expr,value) (expr)
+@@ -148,8 +127,8 @@
+ #define unlikely(expr) expect((expr) != 0, 0)
+
+
+-/**************************************
+- Memory routines
++/*-************************************
++* Memory routines
+ **************************************/
+ #include <stdlib.h> /* malloc, calloc, free */
+ #define ALLOCATOR(n,s) calloc(n,s)
+@@ -158,84 +137,146 @@
+ #define MEM_INIT memset
+
+
+-/**************************************
+- Includes
++/*-************************************
++* Basic Types
+ **************************************/
+-#include "lz4.h"
+-
+-
+-/**************************************
+- Basic Types
+-**************************************/
+-#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
++#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+ # include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
++ typedef uintptr_t uptrval;
+ #else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
++ typedef size_t uptrval; /* generally true, except OpenVMS-64 */
+ #endif
+
+-#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
+-# define _PACKED __attribute__ ((packed))
++#if defined(__x86_64__)
++ typedef U64 reg_t; /* 64-bits in x32 mode */
+ #else
+-# define _PACKED
++ typedef size_t reg_t; /* 32-bits in x32 mode */
+ #endif
+
+-#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+-# if defined(__IBMC__) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+-# pragma pack(1)
+-# else
+-# pragma pack(push, 1)
+-# endif
+-#endif
++/*-************************************
++* Reading and writing into memory
++**************************************/
++static unsigned LZ4_isLittleEndian(void)
++{
++ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
++ return one.c[0];
++}
+
+-typedef struct { U16 v; } _PACKED U16_S;
+-typedef struct { U32 v; } _PACKED U32_S;
+-typedef struct { U64 v; } _PACKED U64_S;
+-typedef struct {size_t v;} _PACKED size_t_S;
+
+-#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
+-# if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+-# pragma pack(0)
+-# else
+-# pragma pack(pop)
+-# endif
+-#endif
++#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
++/* lie to the compiler about data alignment; use with caution */
+
+-#define A16(x) (((U16_S *)(x))->v)
+-#define A32(x) (((U32_S *)(x))->v)
+-#define A64(x) (((U64_S *)(x))->v)
+-#define AARCH(x) (((size_t_S *)(x))->v)
++static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
++static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
++static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
+
++static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
++static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+
+-/**************************************
+- Constants
+-**************************************/
+-#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+-#define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+-#define HASH_SIZE_U32 (1 << LZ4_HASHLOG)
++#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
++
++/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
++/* currently only defined for gcc and icc */
++typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
++
++static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
++static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
++static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
++
++static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
++static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
++
++#else /* safe and portable access through memcpy() */
+
++static U16 LZ4_read16(const void* memPtr)
++{
++ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
++}
++
++static U32 LZ4_read32(const void* memPtr)
++{
++ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
++}
++
++static reg_t LZ4_read_ARCH(const void* memPtr)
++{
++ reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
++}
++
++static void LZ4_write16(void* memPtr, U16 value)
++{
++ memcpy(memPtr, &value, sizeof(value));
++}
++
++static void LZ4_write32(void* memPtr, U32 value)
++{
++ memcpy(memPtr, &value, sizeof(value));
++}
++
++#endif /* LZ4_FORCE_MEMORY_ACCESS */
++
++
++static U16 LZ4_readLE16(const void* memPtr)
++{
++ if (LZ4_isLittleEndian()) {
++ return LZ4_read16(memPtr);
++ } else {
++ const BYTE* p = (const BYTE*)memPtr;
++ return (U16)((U16)p[0] + (p[1]<<8));
++ }
++}
++
++static void LZ4_writeLE16(void* memPtr, U16 value)
++{
++ if (LZ4_isLittleEndian()) {
++ LZ4_write16(memPtr, value);
++ } else {
++ BYTE* p = (BYTE*)memPtr;
++ p[0] = (BYTE) value;
++ p[1] = (BYTE)(value>>8);
++ }
++}
++
++static void LZ4_copy8(void* dst, const void* src)
++{
++ memcpy(dst,src,8);
++}
++
++/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
++static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
++{
++ BYTE* d = (BYTE*)dstPtr;
++ const BYTE* s = (const BYTE*)srcPtr;
++ BYTE* const e = (BYTE*)dstEnd;
++
++ do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
++}
++
++
++/*-************************************
++* Common Constants
++**************************************/
+ #define MINMATCH 4
+
+-#define COPYLENGTH 8
++#define WILDCOPYLENGTH 8
+ #define LASTLITERALS 5
+-#define MFLIMIT (COPYLENGTH+MINMATCH)
++#define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
+ static const int LZ4_minLength = (MFLIMIT+1);
+
+-#define KB *(1U<<10)
+-#define MB *(1U<<20)
++#define KB *(1 <<10)
++#define MB *(1 <<20)
+ #define GB *(1U<<30)
+
+-#define LZ4_64KLIMIT ((64 KB) + (MFLIMIT-1))
+-#define SKIPSTRENGTH 6 /* Increasing this value will make the compression run slower on incompressible data */
+-
+ #define MAXD_LOG 16
+ #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+
+@@ -245,208 +286,213 @@ static const int LZ4_minLength = (MFLIMIT+1);
+ #define RUN_MASK ((1U<<RUN_BITS)-1)
+
+
+-/**************************************
+- Structures and local types
++/*-************************************
++* Error detection
+ **************************************/
+-typedef struct {
+- U32 hashTable[HASH_SIZE_U32];
+- U32 currentOffset;
+- U32 initCheck;
+- const BYTE* dictionary;
+- const BYTE* bufferStart;
+- U32 dictSize;
+-} LZ4_stream_t_internal;
++#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
++
++#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
++# include <stdio.h>
++# define DEBUGLOG(l, ...) { \
++ if (l<=LZ4_DEBUG) { \
++ fprintf(stderr, __FILE__ ": "); \
++ fprintf(stderr, __VA_ARGS__); \
++ fprintf(stderr, " \n"); \
++ } }
++#else
++# define DEBUGLOG(l, ...) {} /* disabled */
++#endif
+
+-typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
+-typedef enum { byPtr, byU32, byU16 } tableType_t;
+
+-typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+-typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
++/*-************************************
++* Common functions
++**************************************/
++static unsigned LZ4_NbCommonBytes (register reg_t val)
++{
++ if (LZ4_isLittleEndian()) {
++ if (sizeof(val)==8) {
++# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
++ unsigned long r = 0;
++ _BitScanForward64( &r, (U64)val );
++ return (int)(r>>3);
++# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
++ return (__builtin_ctzll((U64)val) >> 3);
++# else
++ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
++ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
++# endif
++ } else /* 32 bits */ {
++# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
++ unsigned long r;
++ _BitScanForward( &r, (U32)val );
++ return (int)(r>>3);
++# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
++ return (__builtin_ctz((U32)val) >> 3);
++# else
++ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
++ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
++# endif
++ }
++ } else /* Big Endian CPU */ {
++ if (sizeof(val)==8) {
++# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
++ unsigned long r = 0;
++ _BitScanReverse64( &r, val );
++ return (unsigned)(r>>3);
++# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
++ return (__builtin_clzll((U64)val) >> 3);
++# else
++ unsigned r;
++ if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
++ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
++ r += (!val);
++ return r;
++# endif
++ } else /* 32 bits */ {
++# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
++ unsigned long r = 0;
++ _BitScanReverse( &r, (unsigned long)val );
++ return (unsigned)(r>>3);
++# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
++ return (__builtin_clz((U32)val) >> 3);
++# else
++ unsigned r;
++ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
++ r += (!val);
++ return r;
++# endif
++ }
++ }
++}
+
+-typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+-typedef enum { full = 0, partial = 1 } earlyEnd_directive;
++#define STEPSIZE sizeof(reg_t)
++static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
++{
++ const BYTE* const pStart = pIn;
+
++ while (likely(pIn<pInLimit-(STEPSIZE-1))) {
++ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
++ if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
++ pIn += LZ4_NbCommonBytes(diff);
++ return (unsigned)(pIn - pStart);
++ }
+
+-/**************************************
+- Architecture-specific macros
+-**************************************/
+-#define STEPSIZE sizeof(size_t)
+-#define LZ4_COPYSTEP(d,s) { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
+-#define LZ4_COPY8(d,s) { LZ4_COPYSTEP(d,s); if (STEPSIZE<8) LZ4_COPYSTEP(d,s); }
+-
+-#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
+-# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
+-# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
+-#else /* Little Endian */
+-# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
+-# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
+-#endif
++ if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
++ if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
++ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
++ return (unsigned)(pIn - pStart);
++}
+
+
+-/**************************************
+- Macros
++#ifndef LZ4_COMMONDEFS_ONLY
++/*-************************************
++* Local Constants
+ **************************************/
+-#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
+-#if LZ4_ARCH64 || !defined(__GNUC__)
+-# define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } /* at the end, d>=e; */
+-#else
+-# define LZ4_WILDCOPY(d,s,e) { if (likely(e-d <= 8)) LZ4_COPY8(d,s) else do { LZ4_COPY8(d,s) } while (d<e); }
+-#endif
++static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
++static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
+
+
+-/****************************
+- Private local functions
+-****************************/
+-#if LZ4_ARCH64
++/*-************************************
++* Local Structures and types
++**************************************/
++typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
++typedef enum { byPtr, byU32, byU16 } tableType_t;
+
+-int LZ4_NbCommonBytes (register U64 val)
+-{
+-# if defined(LZ4_BIG_ENDIAN)
+-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+- unsigned long r = 0;
+- _BitScanReverse64( &r, val );
+- return (int)(r>>3);
+-# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+- return (__builtin_clzll(val) >> 3);
+-# else
+- int r;
+- if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
+- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+- r += (!val);
+- return r;
+-# endif
+-# else
+-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+- unsigned long r = 0;
+- _BitScanForward64( &r, val );
+- return (int)(r>>3);
+-# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+- return (__builtin_ctzll(val) >> 3);
+-# else
+- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+-# endif
+-# endif
+-}
++typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
++typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+-#else
++typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
++typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+
+-int LZ4_NbCommonBytes (register U32 val)
+-{
+-# if defined(LZ4_BIG_ENDIAN)
+-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+- unsigned long r = 0;
+- _BitScanReverse( &r, val );
+- return (int)(r>>3);
+-# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+- return (__builtin_clz(val) >> 3);
+-# else
+- int r;
+- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+- r += (!val);
+- return r;
+-# endif
+-# else
+-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+- unsigned long r;
+- _BitScanForward( &r, val );
+- return (int)(r>>3);
+-# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
+- return (__builtin_ctz(val) >> 3);
+-# else
+- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+-# endif
+-# endif
+-}
+
+-#endif
++/*-************************************
++* Local Utils
++**************************************/
++int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
++const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
++int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
++int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
+
+
+-/********************************
+- Compression functions
++/*-******************************
++* Compression functions
+ ********************************/
+-int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
+-
+-static int LZ4_hashSequence(U32 sequence, tableType_t tableType)
++static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
+ {
+ if (tableType == byU16)
+- return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
++ return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
+ else
+- return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
++ return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
+ }
+
+-static int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
++static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
++{
++ static const U64 prime5bytes = 889523592379ULL;
++ static const U64 prime8bytes = 11400714785074694791ULL;
++ const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
++ if (LZ4_isLittleEndian())
++ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
++ else
++ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
++}
++
++FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
++{
++ if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
++ return LZ4_hash4(LZ4_read32(p), tableType);
++}
+
+-static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
++static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
+ {
+ switch (tableType)
+ {
+- case byPtr: { const BYTE** hashTable = (const BYTE**) tableBase; hashTable[h] = p; break; }
+- case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); break; }
+- case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); break; }
++ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
++ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
++ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
+ }
+ }
+
+-static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
++FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+ {
+- U32 h = LZ4_hashPosition(p, tableType);
++ U32 const h = LZ4_hashPosition(p, tableType);
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+ }
+
+ static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+ {
+ if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
+- if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
+- { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
++ if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
++ { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
+ }
+
+-static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
++FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+ {
+- U32 h = LZ4_hashPosition(p, tableType);
++ U32 const h = LZ4_hashPosition(p, tableType);
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+ }
+
+-static unsigned LZ4_count(const BYTE* pIn, const BYTE* pRef, const BYTE* pInLimit)
+-{
+- const BYTE* const pStart = pIn;
+-
+- while (likely(pIn<pInLimit-(STEPSIZE-1)))
+- {
+- size_t diff = AARCH(pRef) ^ AARCH(pIn);
+- if (!diff) { pIn+=STEPSIZE; pRef+=STEPSIZE; continue; }
+- pIn += LZ4_NbCommonBytes(diff);
+- return (unsigned)(pIn - pStart);
+- }
+- if (sizeof(void*)==8) if ((pIn<(pInLimit-3)) && (A32(pRef) == A32(pIn))) { pIn+=4; pRef+=4; }
+- if ((pIn<(pInLimit-1)) && (A16(pRef) == A16(pIn))) { pIn+=2; pRef+=2; }
+- if ((pIn<pInLimit) && (*pRef == *pIn)) pIn++;
+-
+- return (unsigned)(pIn - pStart);
+-}
+-
+
+-static int LZ4_compress_generic(
+- void* ctx,
+- const char* source,
+- char* dest,
+- int inputSize,
+- int maxOutputSize,
+-
+- limitedOutput_directive outputLimited,
+- tableType_t tableType,
+- dict_directive dict,
+- dictIssue_directive dictIssue)
++/** LZ4_compress_generic() :
++ inlined, to ensure branches are decided at compilation time */
++FORCE_INLINE int LZ4_compress_generic(
++ LZ4_stream_t_internal* const cctx,
++ const char* const source,
++ char* const dest,
++ const int inputSize,
++ const int maxOutputSize,
++ const limitedOutput_directive outputLimited,
++ const tableType_t tableType,
++ const dict_directive dict,
++ const dictIssue_directive dictIssue,
++ const U32 acceleration)
+ {
+- LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx;
+-
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* base;
+ const BYTE* lowLimit;
+- const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
+- const BYTE* const dictionary = dictPtr->dictionary;
+- const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
+- const size_t dictDelta = dictEnd - (const BYTE*)source;
++ const BYTE* const lowRefLimit = ip - cctx->dictSize;
++ const BYTE* const dictionary = cctx->dictionary;
++ const BYTE* const dictEnd = dictionary + cctx->dictSize;
++ const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
+ const BYTE* anchor = (const BYTE*) source;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimit = iend - MFLIMIT;
+@@ -455,12 +501,10 @@ static int LZ4_compress_generic(
+ BYTE* op = (BYTE*) dest;
+ BYTE* const olimit = op + maxOutputSize;
+
+- const int skipStrength = SKIPSTRENGTH;
+ U32 forwardH;
+- size_t refDelta=0;
+
+ /* Init conditions */
+- if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
++ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */
+ switch(dict)
+ {
+ case noDict:
+@@ -469,125 +513,118 @@ static int LZ4_compress_generic(
+ lowLimit = (const BYTE*)source;
+ break;
+ case withPrefix64k:
+- base = (const BYTE*)source - dictPtr->currentOffset;
+- lowLimit = (const BYTE*)source - dictPtr->dictSize;
++ base = (const BYTE*)source - cctx->currentOffset;
++ lowLimit = (const BYTE*)source - cctx->dictSize;
+ break;
+ case usingExtDict:
+- base = (const BYTE*)source - dictPtr->currentOffset;
++ base = (const BYTE*)source - cctx->currentOffset;
+ lowLimit = (const BYTE*)source;
+ break;
+ }
+- if ((tableType == byU16) && (inputSize>=(int)LZ4_64KLIMIT)) return 0; /* Size too large (not within 64K limit) */
+- if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
++ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
++ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* First Byte */
+- LZ4_putPosition(ip, ctx, tableType, base);
++ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+- for ( ; ; )
+- {
+- const BYTE* ref;
++ for ( ; ; ) {
++ ptrdiff_t refDelta = 0;
++ const BYTE* match;
+ BYTE* token;
+- {
+- const BYTE* forwardIp = ip;
+- unsigned step=1;
+- unsigned searchMatchNb = (1U << skipStrength);
+
+- /* Find a match */
++ /* Find a match */
++ { const BYTE* forwardIp = ip;
++ unsigned step = 1;
++ unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+- U32 h = forwardH;
++ U32 const h = forwardH;
+ ip = forwardIp;
+ forwardIp += step;
+- step = searchMatchNb++ >> skipStrength;
+- //if (step>8) step=8; // required for valid forwardIp ; slows down uncompressible data a bit
++ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimit)) goto _last_literals;
+
+- ref = LZ4_getPositionOnHash(h, ctx, tableType, base);
+- if (dict==usingExtDict)
+- {
+- if (ref<(const BYTE*)source)
+- {
++ match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
++ if (dict==usingExtDict) {
++ if (match < (const BYTE*)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+- }
+- else
+- {
++ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE*)source;
+- }
+- }
++ } }
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+- LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
++ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+
+- } while ( ((dictIssue==dictSmall) ? (ref < lowRefLimit) : 0)
+- || ((tableType==byU16) ? 0 : (ref + MAX_DISTANCE < ip))
+- || (A32(ref+refDelta) != A32(ip)) );
++ } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
++ || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
++ || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
+ }
+
+ /* Catch up */
+- while ((ip>anchor) && (ref+refDelta > lowLimit) && (unlikely(ip[-1]==ref[refDelta-1]))) { ip--; ref--; }
++ while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
+
+- {
+- /* Encode Literal length */
+- unsigned litLength = (unsigned)(ip - anchor);
++ /* Encode Literals */
++ { unsigned const litLength = (unsigned)(ip - anchor);
+ token = op++;
+- if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
+- return 0; /* Check output limit */
+- if (litLength>=RUN_MASK)
+- {
++ if ((outputLimited) && /* Check output buffer overflow */
++ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
++ return 0;
++ if (litLength >= RUN_MASK) {
+ int len = (int)litLength-RUN_MASK;
+- *token=(RUN_MASK<<ML_BITS);
++ *token = (RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
+ }
+ else *token = (BYTE)(litLength<<ML_BITS);
+
+ /* Copy Literals */
+- { BYTE* end = op+litLength; LZ4_WILDCOPY(op,anchor,end); op=end; }
++ LZ4_wildCopy(op, anchor, op+litLength);
++ op+=litLength;
+ }
+
+ _next_match:
+ /* Encode Offset */
+- LZ4_WRITE_LITTLEENDIAN_16(op, (U16)(ip-ref));
++ LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
+
+ /* Encode MatchLength */
+- {
+- unsigned matchLength;
++ { unsigned matchCode;
+
+- if ((dict==usingExtDict) && (lowLimit==dictionary))
+- {
++ if ((dict==usingExtDict) && (lowLimit==dictionary)) {
+ const BYTE* limit;
+- ref += refDelta;
+- limit = ip + (dictEnd-ref);
++ match += refDelta;
++ limit = ip + (dictEnd-match);
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-ports-head
mailing list