svn commit: r317449 - in vendor/compiler-rt/dist: include/sanitizer lib/builtins lib/lsan lib/sanitizer_common lib/scudo lib/tsan/rtl test test/asan/TestCases/Linux test/asan/TestCases/Posix test/a...

Dimitry Andric dim at FreeBSD.org
Wed Apr 26 19:24:24 UTC 2017


Author: dim
Date: Wed Apr 26 19:24:20 2017
New Revision: 317449
URL: https://svnweb.freebsd.org/changeset/base/317449

Log:
  Vendor import of compiler-rt trunk r301441:
  https://llvm.org/svn/llvm-project/compiler-rt/trunk@301441

Added:
  vendor/compiler-rt/dist/test/asan/TestCases/Linux/textdomain.c   (contents, props changed)
  vendor/compiler-rt/dist/test/cfi/cross-dso/icall/dlopen.cpp   (contents, props changed)
  vendor/compiler-rt/dist/test/tsan/Darwin/deadlock.mm
  vendor/compiler-rt/dist/test/tsan/Darwin/external-dups.cc   (contents, props changed)
  vendor/compiler-rt/dist/test/tsan/Darwin/external-ignore-noninstrumented.cc   (contents, props changed)
  vendor/compiler-rt/dist/test/tsan/Darwin/external-lib.cc   (contents, props changed)
  vendor/compiler-rt/dist/test/tsan/Darwin/external-noninstrumented-module.cc   (contents, props changed)
Deleted:
  vendor/compiler-rt/dist/test/cfi/cross-dso/dlopen.cpp
Modified:
  vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h
  vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt
  vendor/compiler-rt/dist/lib/builtins/emutls.c
  vendor/compiler-rt/dist/lib/lsan/lsan_allocator.h
  vendor/compiler-rt/dist/lib/lsan/lsan_common.cc
  vendor/compiler-rt/dist/lib/lsan/lsan_common.h
  vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_local_cache.h
  vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc
  vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.h
  vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_external.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.h
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.cc
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h
  vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc
  vendor/compiler-rt/dist/test/CMakeLists.txt
  vendor/compiler-rt/dist/test/asan/TestCases/Linux/read_binary_name_regtest.c
  vendor/compiler-rt/dist/test/asan/TestCases/Posix/strchr.c
  vendor/compiler-rt/dist/test/asan/TestCases/Windows/dll_global_dead_strip.c
  vendor/compiler-rt/dist/test/asan/TestCases/Windows/fuse-lld.cc
  vendor/compiler-rt/dist/test/asan/TestCases/Windows/global_dead_strip.c
  vendor/compiler-rt/dist/test/asan/android_commands/android_run.py
  vendor/compiler-rt/dist/test/cfi/CMakeLists.txt
  vendor/compiler-rt/dist/test/cfi/create-derivers.test
  vendor/compiler-rt/dist/test/cfi/cross-dso/icall/lit.local.cfg
  vendor/compiler-rt/dist/test/cfi/cross-dso/stats.cpp
  vendor/compiler-rt/dist/test/cfi/icall/lit.local.cfg
  vendor/compiler-rt/dist/test/cfi/lit.cfg
  vendor/compiler-rt/dist/test/cfi/lit.site.cfg.in
  vendor/compiler-rt/dist/test/lit.common.cfg
  vendor/compiler-rt/dist/test/lit.common.configured.in
  vendor/compiler-rt/dist/test/lsan/lit.common.cfg
  vendor/compiler-rt/dist/test/safestack/lit.cfg
  vendor/compiler-rt/dist/test/tsan/Darwin/debug_external.cc
  vendor/compiler-rt/dist/test/tsan/Darwin/external.cc
  vendor/compiler-rt/dist/test/tsan/test.h
  vendor/compiler-rt/dist/test/tsan/unaligned_race.cc

Modified: vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h
==============================================================================
--- vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/include/sanitizer/tsan_interface.h	Wed Apr 26 19:24:20 2017	(r317449)
@@ -114,6 +114,21 @@ void __tsan_mutex_post_signal(void *addr
 void __tsan_mutex_pre_divert(void *addr, unsigned flags);
 void __tsan_mutex_post_divert(void *addr, unsigned flags);
 
+// External race detection API.
+// Can be used by non-instrumented libraries to detect when their objects are
+// being used in an unsafe manner.
+//   - __tsan_external_read/__tsan_external_write annotates the logical reads
+//       and writes of the object at the specified address. 'caller_pc' should
+//       be the PC of the library user, which the library can obtain with e.g.
+//       `__builtin_return_address(0)`.
+//   - __tsan_external_register_tag registers a 'tag' with the specified name,
+//       which is later used in read/write annotations to denote the object type
+//   - __tsan_external_assign_tag can optionally mark a heap object with a tag
+void *__tsan_external_register_tag(const char *object_type);
+void __tsan_external_assign_tag(void *addr, void *tag);
+void __tsan_external_read(void *addr, void *caller_pc, void *tag);
+void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif

Modified: vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt
==============================================================================
--- vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/builtins/CMakeLists.txt	Wed Apr 26 19:24:20 2017	(r317449)
@@ -164,7 +164,8 @@ set(GENERIC_SOURCES
   udivti3.c
   umoddi3.c
   umodsi3.c
-  umodti3.c)
+  umodti3.c
+  emutls.c)
 
 option(COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN
   "Skip the atomic builtin (this may be needed if system headers are unavailable)"
@@ -187,12 +188,6 @@ if(APPLE)
     atomic_thread_fence.c)
 endif()
 
-if(NOT WIN32 OR MINGW)
-  set(GENERIC_SOURCES
-      ${GENERIC_SOURCES}
-      emutls.c)
-endif()
-
 if (HAVE_UNWIND_H)
   set(GENERIC_SOURCES
       ${GENERIC_SOURCES}

Modified: vendor/compiler-rt/dist/lib/builtins/emutls.c
==============================================================================
--- vendor/compiler-rt/dist/lib/builtins/emutls.c	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/builtins/emutls.c	Wed Apr 26 19:24:20 2017	(r317449)
@@ -7,7 +7,6 @@
  *
  * ===----------------------------------------------------------------------===
  */
-#include <pthread.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <string.h>
@@ -15,6 +14,23 @@
 #include "int_lib.h"
 #include "int_util.h"
 
+typedef struct emutls_address_array {
+    uintptr_t size;  /* number of elements in the 'data' array */
+    void* data[];
+} emutls_address_array;
+
+static void emutls_shutdown(emutls_address_array *array);
+
+#ifndef _WIN32
+
+#include <pthread.h>
+
+static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_key_t emutls_pthread_key;
+
+typedef unsigned int gcc_word __attribute__((mode(word)));
+typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
+
 /* Default is not to use posix_memalign, so systems like Android
  * can use thread local data without heavier POSIX memory allocators.
  */
@@ -22,26 +38,6 @@
 #define EMUTLS_USE_POSIX_MEMALIGN 0
 #endif
 
-/* For every TLS variable xyz,
- * there is one __emutls_control variable named __emutls_v.xyz.
- * If xyz has non-zero initial value, __emutls_v.xyz's "value"
- * will point to __emutls_t.xyz, which has the initial value.
- */
-typedef unsigned int gcc_word __attribute__((mode(word)));
-typedef struct __emutls_control {
-    /* Must use gcc_word here, instead of size_t, to match GCC.  When
-       gcc_word is larger than size_t, the upper extra bits are all
-       zeros.  We can use variables of size_t to operate on size and
-       align.  */
-    gcc_word size;  /* size of the object in bytes */
-    gcc_word align;  /* alignment of the object in bytes */
-    union {
-        uintptr_t index;  /* data[index-1] is the object address */
-        void* address;  /* object address, when in single thread env */
-    } object;
-    void* value;  /* null or non-zero initial value for the object */
-} __emutls_control;
-
 static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
     void *base;
 #if EMUTLS_USE_POSIX_MEMALIGN
@@ -50,7 +46,7 @@ static __inline void *emutls_memalign_al
 #else
     #define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void*))
     char* object;
-    if ((object = malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
+    if ((object = (char*)malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
         abort();
     base = (void*)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES))
                     & ~(uintptr_t)(align - 1));
@@ -69,10 +65,207 @@ static __inline void emutls_memalign_fre
 #endif
 }
 
+static void emutls_key_destructor(void* ptr) {
+    emutls_shutdown((emutls_address_array*)ptr);
+    free(ptr);
+}
+
+static __inline void emutls_init(void) {
+    if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
+        abort();
+}
+
+static __inline void emutls_init_once(void) {
+    static pthread_once_t once = PTHREAD_ONCE_INIT;
+    pthread_once(&once, emutls_init);
+}
+
+static __inline void emutls_lock() {
+    pthread_mutex_lock(&emutls_mutex);
+}
+
+static __inline void emutls_unlock() {
+    pthread_mutex_unlock(&emutls_mutex);
+}
+
+static __inline void emutls_setspecific(emutls_address_array *value) {
+    pthread_setspecific(emutls_pthread_key, (void*) value);
+}
+
+static __inline emutls_address_array* emutls_getspecific() {
+    return (emutls_address_array*) pthread_getspecific(emutls_pthread_key);
+}
+
+#else
+
+#include <Windows.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <assert.h>
+#include <immintrin.h>
+
+static LPCRITICAL_SECTION emutls_mutex;
+static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
+
+typedef uintptr_t gcc_word;
+typedef void * gcc_pointer;
+
+static void win_error(DWORD last_err, const char *hint) {
+    char *buffer = NULL;
+    if (FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+                       FORMAT_MESSAGE_FROM_SYSTEM |
+                       FORMAT_MESSAGE_MAX_WIDTH_MASK,
+                       NULL, last_err, 0, (LPSTR)&buffer, 1, NULL)) {
+        fprintf(stderr, "Windows error: %s\n", buffer);
+    } else {
+        fprintf(stderr, "Unkown Windows error: %s\n", hint);
+    }
+    LocalFree(buffer);
+}
+
+static __inline void win_abort(DWORD last_err, const char *hint) {
+    win_error(last_err, hint);
+    abort();
+}
+
+static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
+    void *base = _aligned_malloc(size, align);
+    if (!base)
+        win_abort(GetLastError(), "_aligned_malloc");
+    return base;
+}
+
+static __inline void emutls_memalign_free(void *base) {
+    _aligned_free(base);
+}
+
+static void emutls_exit(void) {
+    if (emutls_mutex) {
+        DeleteCriticalSection(emutls_mutex);
+        _aligned_free(emutls_mutex);
+        emutls_mutex = NULL;
+    }
+    if (emutls_tls_index != TLS_OUT_OF_INDEXES) {
+        emutls_shutdown((emutls_address_array*)TlsGetValue(emutls_tls_index));
+        TlsFree(emutls_tls_index);
+        emutls_tls_index = TLS_OUT_OF_INDEXES;
+    }
+}
+
+#pragma warning (push)
+#pragma warning (disable : 4100)
+static BOOL CALLBACK emutls_init(PINIT_ONCE p0, PVOID p1, PVOID *p2) {
+    emutls_mutex = (LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
+    if (!emutls_mutex) {
+        win_error(GetLastError(), "_aligned_malloc");
+        return FALSE;
+    }
+    InitializeCriticalSection(emutls_mutex);
+
+    emutls_tls_index = TlsAlloc();
+    if (emutls_tls_index == TLS_OUT_OF_INDEXES) {
+        emutls_exit();
+        win_error(GetLastError(), "TlsAlloc");
+        return FALSE;
+    }
+    atexit(&emutls_exit);
+    return TRUE;
+}
+
+static __inline void emutls_init_once(void) {
+    static INIT_ONCE once;
+    InitOnceExecuteOnce(&once, emutls_init, NULL, NULL);
+}
+
+static __inline void emutls_lock() {
+    EnterCriticalSection(emutls_mutex);
+}
+
+static __inline void emutls_unlock() {
+    LeaveCriticalSection(emutls_mutex);
+}
+
+static __inline void emutls_setspecific(emutls_address_array *value) {
+    if (TlsSetValue(emutls_tls_index, (LPVOID) value) == 0)
+        win_abort(GetLastError(), "TlsSetValue");
+}
+
+static __inline emutls_address_array* emutls_getspecific() {
+    LPVOID value = TlsGetValue(emutls_tls_index);
+    if (value == NULL) {
+        const DWORD err = GetLastError();
+        if (err != ERROR_SUCCESS)
+            win_abort(err, "TlsGetValue");
+    }
+    return (emutls_address_array*) value;
+}
+
+/* Provide atomic load/store functions for emutls_get_index if built with MSVC.
+ */
+#if !defined(__ATOMIC_RELEASE)
+
+enum { __ATOMIC_ACQUIRE = 2, __ATOMIC_RELEASE = 3 };
+
+static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) {
+    assert(type == __ATOMIC_ACQUIRE);
+#ifdef _WIN64
+    return (uintptr_t) _load_be_u64(ptr);
+#else
+    return (uintptr_t) _load_be_u32(ptr);
+#endif
+}
+
+static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
+    assert(type == __ATOMIC_RELEASE);
+#ifdef _WIN64
+    _store_be_u64(ptr, val);
+#else
+    _store_be_u32(ptr, val);
+#endif
+}
+
+#endif
+
+#pragma warning (pop)
+
+#endif
+
+static size_t emutls_num_object = 0;  /* number of allocated TLS objects */
+
+/* Free the allocated TLS data
+ */
+static void emutls_shutdown(emutls_address_array *array) {
+    if (array) {
+        uintptr_t i;
+        for (i = 0; i < array->size; ++i) {
+            if (array->data[i])
+                emutls_memalign_free(array->data[i]);
+        }
+    }
+}
+
+/* For every TLS variable xyz,
+ * there is one __emutls_control variable named __emutls_v.xyz.
+ * If xyz has non-zero initial value, __emutls_v.xyz's "value"
+ * will point to __emutls_t.xyz, which has the initial value.
+ */
+typedef struct __emutls_control {
+    /* Must use gcc_word here, instead of size_t, to match GCC.  When
+       gcc_word is larger than size_t, the upper extra bits are all
+       zeros.  We can use variables of size_t to operate on size and
+       align.  */
+    gcc_word size;  /* size of the object in bytes */
+    gcc_word align;  /* alignment of the object in bytes */
+    union {
+        uintptr_t index;  /* data[index-1] is the object address */
+        void* address;  /* object address, when in single thread env */
+    } object;
+    void* value;  /* null or non-zero initial value for the object */
+} __emutls_control;
+
 /* Emulated TLS objects are always allocated at run-time. */
 static __inline void *emutls_allocate_object(__emutls_control *control) {
     /* Use standard C types, check with gcc's emutls.o. */
-    typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
     COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(gcc_pointer));
     COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void*));
 
@@ -93,45 +286,19 @@ static __inline void *emutls_allocate_ob
     return base;
 }
 
-static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-static size_t emutls_num_object = 0;  /* number of allocated TLS objects */
-
-typedef struct emutls_address_array {
-    uintptr_t size;  /* number of elements in the 'data' array */
-    void* data[];
-} emutls_address_array;
-
-static pthread_key_t emutls_pthread_key;
-
-static void emutls_key_destructor(void* ptr) {
-    emutls_address_array* array = (emutls_address_array*)ptr;
-    uintptr_t i;
-    for (i = 0; i < array->size; ++i) {
-        if (array->data[i])
-            emutls_memalign_free(array->data[i]);
-    }
-    free(ptr);
-}
-
-static void emutls_init(void) {
-    if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
-        abort();
-}
 
 /* Returns control->object.index; set index if not allocated yet. */
 static __inline uintptr_t emutls_get_index(__emutls_control *control) {
     uintptr_t index = __atomic_load_n(&control->object.index, __ATOMIC_ACQUIRE);
     if (!index) {
-        static pthread_once_t once = PTHREAD_ONCE_INIT;
-        pthread_once(&once, emutls_init);
-        pthread_mutex_lock(&emutls_mutex);
+        emutls_init_once();
+        emutls_lock();
         index = control->object.index;
         if (!index) {
             index = ++emutls_num_object;
             __atomic_store_n(&control->object.index, index, __ATOMIC_RELEASE);
         }
-        pthread_mutex_unlock(&emutls_mutex);
+        emutls_unlock();
     }
     return index;
 }
@@ -142,7 +309,7 @@ static __inline void emutls_check_array_
     if (array == NULL)
         abort();
     array->size = size;
-    pthread_setspecific(emutls_pthread_key, (void*)array);
+    emutls_setspecific(array);
 }
 
 /* Returns the new 'data' array size, number of elements,
@@ -156,22 +323,29 @@ static __inline uintptr_t emutls_new_dat
     return ((index + 1 + 15) & ~((uintptr_t)15)) - 1;
 }
 
+/* Returns the size in bytes required for an emutls_address_array with
+ * N number of elements for data field.
+ */
+static __inline uintptr_t emutls_asize(uintptr_t N) {
+    return N * sizeof(void *) + sizeof(emutls_address_array);
+}
+
 /* Returns the thread local emutls_address_array.
  * Extends its size if necessary to hold address at index.
  */
 static __inline emutls_address_array *
 emutls_get_address_array(uintptr_t index) {
-    emutls_address_array* array = pthread_getspecific(emutls_pthread_key);
+    emutls_address_array* array = emutls_getspecific();
     if (array == NULL) {
         uintptr_t new_size = emutls_new_data_array_size(index);
-        array = malloc(new_size * sizeof(void *) + sizeof(emutls_address_array));
+        array = (emutls_address_array*) malloc(emutls_asize(new_size));
         if (array)
             memset(array->data, 0, new_size * sizeof(void*));
         emutls_check_array_set_size(array, new_size);
     } else if (index > array->size) {
         uintptr_t orig_size = array->size;
         uintptr_t new_size = emutls_new_data_array_size(index);
-        array = realloc(array, new_size * sizeof(void *) + sizeof(emutls_address_array));
+        array = (emutls_address_array*) realloc(array, emutls_asize(new_size));
         if (array)
             memset(array->data + orig_size, 0,
                    (new_size - orig_size) * sizeof(void*));
@@ -182,8 +356,8 @@ emutls_get_address_array(uintptr_t index
 
 void* __emutls_get_address(__emutls_control* control) {
     uintptr_t index = emutls_get_index(control);
-    emutls_address_array* array = emutls_get_address_array(index);
-    if (array->data[index - 1] == NULL)
-        array->data[index - 1] = emutls_allocate_object(control);
-    return array->data[index - 1];
+    emutls_address_array* array = emutls_get_address_array(index--);
+    if (array->data[index] == NULL)
+        array->data[index] = emutls_allocate_object(control);
+    return array->data[index];
 }

Modified: vendor/compiler-rt/dist/lib/lsan/lsan_allocator.h
==============================================================================
--- vendor/compiler-rt/dist/lib/lsan/lsan_allocator.h	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/lsan/lsan_allocator.h	Wed Apr 26 19:24:20 2017	(r317449)
@@ -59,7 +59,7 @@ typedef CompactSizeClassMap SizeClassMap
 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
     sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
     PrimaryAllocator;
-#elif defined(__x86_64__)
+#elif defined(__x86_64__) || defined(__powerpc64__)
 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
   static const uptr kSpaceBeg = 0x600000000000ULL;
   static const uptr kSpaceSize =  0x40000000000ULL; // 4T.

Modified: vendor/compiler-rt/dist/lib/lsan/lsan_common.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/lsan/lsan_common.cc	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/lsan/lsan_common.cc	Wed Apr 26 19:24:20 2017	(r317449)
@@ -70,12 +70,13 @@ static const char kSuppressionLeak[] = "
 static const char *kSuppressionTypes[] = { kSuppressionLeak };
 static const char kStdSuppressions[] =
 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
-  // The actual string allocation happens here (for more details refer to the
-  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT definition).
-  "leak:*_dl_map_object_deps*";
-#else
-  "";
+  // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+  // definition.
+  "leak:*pthread_exit*\n"
 #endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+  // TLS leak in some glibc versions, described in
+  // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+  "leak:*tls_get_addr*\n";
 
 void InitializeSuppressions() {
   CHECK_EQ(nullptr, suppression_ctx);

Modified: vendor/compiler-rt/dist/lib/lsan/lsan_common.h
==============================================================================
--- vendor/compiler-rt/dist/lib/lsan/lsan_common.h	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/lsan/lsan_common.h	Wed Apr 26 19:24:20 2017	(r317449)
@@ -32,7 +32,8 @@
 // new architecture inside sanitizer library.
 #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
     (SANITIZER_WORDSIZE == 64) &&                               \
-    (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
+    (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
+     defined(__powerpc64__))
 #define CAN_SANITIZE_LEAKS 1
 #elif defined(__i386__) && \
     (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)

Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_local_cache.h
==============================================================================
--- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_local_cache.h	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_local_cache.h	Wed Apr 26 19:24:20 2017	(r317449)
@@ -180,6 +180,7 @@ struct SizeClassAllocator32LocalCache {
     uptr count;
     uptr max_count;
     uptr class_size;
+    uptr class_id_for_transfer_batch;
     void *batch[2 * TransferBatch::kMaxNumCached];
   };
   PerClass per_class_[kNumClasses];
@@ -188,32 +189,31 @@ struct SizeClassAllocator32LocalCache {
   void InitCache() {
     if (per_class_[1].max_count)
       return;
+    // TransferBatch class is declared in SizeClassAllocator.
+    uptr class_id_for_transfer_batch =
+        SizeClassMap::ClassID(sizeof(TransferBatch));
     for (uptr i = 0; i < kNumClasses; i++) {
       PerClass *c = &per_class_[i];
-      c->max_count = 2 * TransferBatch::MaxCached(i);
+      uptr max_cached = TransferBatch::MaxCached(i);
+      c->max_count = 2 * max_cached;
       c->class_size = Allocator::ClassIdToSize(i);
+      // We transfer chunks between central and thread-local free lists in
+      // batches. For small size classes we allocate batches separately. For
+      // large size classes we may use one of the chunks to store the batch.
+      // sizeof(TransferBatch) must be a power of 2 for more efficient
+      // allocation.
+      c->class_id_for_transfer_batch = (c->class_size <
+          TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
+              class_id_for_transfer_batch : 0;
     }
   }
 
-  // TransferBatch class is declared in SizeClassAllocator.
-  // We transfer chunks between central and thread-local free lists in batches.
-  // For small size classes we allocate batches separately.
-  // For large size classes we may use one of the chunks to store the batch.
-  // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
-  static uptr SizeClassForTransferBatch(uptr class_id) {
-    if (Allocator::ClassIdToSize(class_id) <
-        TransferBatch::AllocationSizeRequiredForNElements(
-            TransferBatch::MaxCached(class_id)))
-      return SizeClassMap::ClassID(sizeof(TransferBatch));
-    return 0;
-  }
-
   // Returns a TransferBatch suitable for class_id.
   // For small size classes allocates the batch from the allocator.
   // For large size classes simply returns b.
   TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
                              TransferBatch *b) {
-    if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+    if (uptr batch_class_id = per_class_[class_id].class_id_for_transfer_batch)
       return (TransferBatch*)Allocate(allocator, batch_class_id);
     return b;
   }
@@ -223,7 +223,7 @@ struct SizeClassAllocator32LocalCache {
   // Does notthing for large size classes.
   void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
                     TransferBatch *b) {
-    if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+    if (uptr batch_class_id = per_class_[class_id].class_id_for_transfer_batch)
       Deallocate(allocator, batch_class_id, b);
   }
 

Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc
==============================================================================
--- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc	Wed Apr 26 19:24:20 2017	(r317449)
@@ -304,7 +304,7 @@ INTERCEPTOR(SIZE_T, strnlen, const char 
 INTERCEPTOR(char*, textdomain, const char *domainname) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);
-  COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
+  if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
   char *domain = REAL(textdomain)(domainname);
   if (domain) {
     COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
@@ -3330,7 +3330,7 @@ INTERCEPTOR(char *, strerror, int errnum
 //  * GNU version returns message pointer, which points to either buf or some
 //    static storage.
 #if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
-    SANITIZER_MAC
+    SANITIZER_MAC || SANITIZER_ANDROID
 // POSIX version. Spec is not clear on whether buf is NULL-terminated.
 // At least on OSX, buf contents are valid even when the call fails.
 INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {

Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.h
==============================================================================
--- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.h	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform_limits_posix.h	Wed Apr 26 19:24:20 2017	(r317449)
@@ -83,7 +83,7 @@ namespace __sanitizer {
 #elif defined(__mips__)
   const unsigned struct_kernel_stat_sz =
                  SANITIZER_ANDROID ? FIRST_32_SECOND_64(104, 128) :
-                                     FIRST_32_SECOND_64(144, 216);
+                                     FIRST_32_SECOND_64(160, 216);
   const unsigned struct_kernel_stat64_sz = 104;
 #elif defined(__s390__) && !defined(__s390x__)
   const unsigned struct_kernel_stat_sz = 64;

Modified: vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp
==============================================================================
--- vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp	Wed Apr 26 19:24:20 2017	(r317449)
@@ -460,6 +460,38 @@ struct ScudoAllocator {
     return UserPtr;
   }
 
+  // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
+  // we directly deallocate the chunk, otherwise the flow would lead to the
+  // chunk being checksummed twice, once before Put and once in Recycle, with
+  // no additional security value.
+  void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
+                                   uptr Size) {
+    bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
+    if (BypassQuarantine) {
+      Chunk->eraseHeader();
+      void *Ptr = Chunk->getAllocBeg(Header);
+      if (LIKELY(!ThreadTornDown)) {
+        getBackendAllocator().Deallocate(&Cache, Ptr);
+      } else {
+        SpinMutexLock Lock(&FallbackMutex);
+        getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
+      }
+    } else {
+      UnpackedHeader NewHeader = *Header;
+      NewHeader.State = ChunkQuarantine;
+      Chunk->compareExchangeHeader(&NewHeader, Header);
+      if (LIKELY(!ThreadTornDown)) {
+        AllocatorQuarantine.Put(&ThreadQuarantineCache,
+                                QuarantineCallback(&Cache), Chunk, Size);
+      } else {
+        SpinMutexLock l(&FallbackMutex);
+        AllocatorQuarantine.Put(&FallbackQuarantineCache,
+                                QuarantineCallback(&FallbackAllocatorCache),
+                                Chunk, Size);
+      }
+    }
+  }
+
   // Deallocates a Chunk, which means adding it to the delayed free list (or
   // Quarantine).
   void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
@@ -499,24 +531,12 @@ struct ScudoAllocator {
       }
     }
 
-    UnpackedHeader NewHeader = OldHeader;
-    NewHeader.State = ChunkQuarantine;
-    Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
-
     // If a small memory amount was allocated with a larger alignment, we want
     // to take that into account. Otherwise the Quarantine would be filled with
-    // tiny chunks, taking a lot of VA memory. This an approximation of the
+    // tiny chunks, taking a lot of VA memory. This is an approximation of the
     // usable size, that allows us to not call GetActuallyAllocatedSize.
     uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
-    if (LIKELY(!ThreadTornDown)) {
-      AllocatorQuarantine.Put(&ThreadQuarantineCache,
-                              QuarantineCallback(&Cache), Chunk, LiableSize);
-    } else {
-      SpinMutexLock l(&FallbackMutex);
-      AllocatorQuarantine.Put(&FallbackQuarantineCache,
-                              QuarantineCallback(&FallbackAllocatorCache),
-                              Chunk, LiableSize);
-    }
+    quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
   }
 
   // Reallocates a chunk. We can save on a new allocation if the new requested
@@ -541,11 +561,11 @@ struct ScudoAllocator {
                      OldPtr);
     }
     uptr UsableSize = Chunk->getUsableSize(&OldHeader);
-    UnpackedHeader NewHeader = OldHeader;
     // The new size still fits in the current chunk, and the size difference
     // is reasonable.
     if (NewSize <= UsableSize &&
         (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
+      UnpackedHeader NewHeader = OldHeader;
       NewHeader.SizeOrUnusedBytes =
                 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
       Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
@@ -558,17 +578,7 @@ struct ScudoAllocator {
       uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
           UsableSize - OldHeader.SizeOrUnusedBytes;
       memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
-      NewHeader.State = ChunkQuarantine;
-      Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
-      if (LIKELY(!ThreadTornDown)) {
-        AllocatorQuarantine.Put(&ThreadQuarantineCache,
-                                QuarantineCallback(&Cache), Chunk, UsableSize);
-      } else {
-        SpinMutexLock l(&FallbackMutex);
-        AllocatorQuarantine.Put(&FallbackQuarantineCache,
-                                QuarantineCallback(&FallbackAllocatorCache),
-                                Chunk, UsableSize);
-      }
+      quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
     }
     return NewPtr;
   }

Modified: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_external.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_external.cc	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_external.cc	Wed Apr 26 19:24:20 2017	(r317449)
@@ -11,6 +11,7 @@
 //
 //===----------------------------------------------------------------------===//
 #include "tsan_rtl.h"
+#include "tsan_interceptors.h"
 
 namespace __tsan {
 
@@ -29,6 +30,20 @@ const char *GetObjectTypeFromTag(uptr ta
   return registered_tags[tag];
 }
 
+typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
+void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) {
+  CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+  ThreadState *thr = cur_thread();
+  thr->external_tag = (uptr)tag;
+  if (caller_pc) FuncEntry(thr, (uptr)caller_pc);
+  bool in_ignored_lib;
+  if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) {
+    access(thr, CALLERPC, (uptr)addr, kSizeLog1);
+  }
+  if (caller_pc) FuncExit(thr);
+  thr->external_tag = 0;
+}
+
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
 void *__tsan_external_register_tag(const char *object_type) {
@@ -54,24 +69,12 @@ void __tsan_external_assign_tag(void *ad
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
-  CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
-  ThreadState *thr = cur_thread();
-  thr->external_tag = (uptr)tag;
-  FuncEntry(thr, (uptr)caller_pc);
-  MemoryRead(thr, CALLERPC, (uptr)addr, kSizeLog8);
-  FuncExit(thr);
-  thr->external_tag = 0;
+  ExternalAccess(addr, caller_pc, tag, MemoryRead);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
-  CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
-  ThreadState *thr = cur_thread();
-  thr->external_tag = (uptr)tag;
-  FuncEntry(thr, (uptr)caller_pc);
-  MemoryWrite(thr, CALLERPC, (uptr)addr, kSizeLog8);
-  FuncExit(thr);
-  thr->external_tag = 0;
+  ExternalAccess(addr, caller_pc, tag, MemoryWrite);
 }
 }  // extern "C"
 

Modified: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.cc	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.cc	Wed Apr 26 19:24:20 2017	(r317449)
@@ -210,7 +210,7 @@ struct ThreadSignalContext {
 // The object is 64-byte aligned, because we want hot data to be located in
 // a single cache line if possible (it's accessed in every interceptor).
 static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)];
-static LibIgnore *libignore() {
+LibIgnore *libignore() {
   return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]);
 }
 
@@ -269,6 +269,7 @@ ScopedInterceptor::~ScopedInterceptor() 
 void ScopedInterceptor::EnableIgnores() {
   if (ignoring_) {
     ThreadIgnoreBegin(thr_, pc_, false);
+    if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
     if (in_ignored_lib_) {
       DCHECK(!thr_->in_ignored_lib);
       thr_->in_ignored_lib = true;
@@ -279,6 +280,7 @@ void ScopedInterceptor::EnableIgnores() 
 void ScopedInterceptor::DisableIgnores() {
   if (ignoring_) {
     ThreadIgnoreEnd(thr_, pc_);
+    if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
     if (in_ignored_lib_) {
       DCHECK(thr_->in_ignored_lib);
       thr_->in_ignored_lib = false;

Modified: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.h
==============================================================================
--- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.h	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interceptors.h	Wed Apr 26 19:24:20 2017	(r317449)
@@ -19,6 +19,8 @@ class ScopedInterceptor {
   bool ignoring_;
 };
 
+LibIgnore *libignore();
+
 }  // namespace __tsan
 
 #define SCOPED_INTERCEPTOR_RAW(func, ...) \

Modified: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.cc	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.cc	Wed Apr 26 19:24:20 2017	(r317449)
@@ -169,7 +169,7 @@ static void PrintMop(const ReportMop *mo
            MopDesc(first, mop->write, mop->atomic), mop->size,
            (void *)mop->addr, thread_name(thrbuf, mop->tid));
   } else {
-    Printf("  %s access of object %s at %p by %s",
+    Printf("  %s access of %s at %p by %s",
            ExternalMopDesc(first, mop->write), object_type,
            (void *)mop->addr, thread_name(thrbuf, mop->tid));
   }
@@ -202,7 +202,7 @@ static void PrintLocation(const ReportLo
              loc->heap_chunk_size, loc->heap_chunk_start,
              thread_name(thrbuf, loc->tid));
     } else {
-      Printf("  Location is %s object of size %zu at %p allocated by %s:\n",
+      Printf("  Location is %s of size %zu at %p allocated by %s:\n",
              object_type, loc->heap_chunk_size, loc->heap_chunk_start,
              thread_name(thrbuf, loc->tid));
     }

Modified: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h
==============================================================================
--- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h	Wed Apr 26 19:24:20 2017	(r317449)
@@ -381,6 +381,7 @@ struct ThreadState {
   // for better performance.
   int ignore_reads_and_writes;
   int ignore_sync;
+  int suppress_reports;
   // Go does not support ignores.
 #if !SANITIZER_GO
   IgnoreSet mop_ignore_set;

Modified: vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc
==============================================================================
--- vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_report.cc	Wed Apr 26 19:24:20 2017	(r317449)
@@ -500,7 +500,7 @@ static void AddRacyStacks(ThreadState *t
 }
 
 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
-  if (!flags()->report_bugs)
+  if (!flags()->report_bugs || thr->suppress_reports)
     return false;
   atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
   const ReportDesc *rep = srep.GetReport();

Modified: vendor/compiler-rt/dist/test/CMakeLists.txt
==============================================================================
--- vendor/compiler-rt/dist/test/CMakeLists.txt	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/test/CMakeLists.txt	Wed Apr 26 19:24:20 2017	(r317449)
@@ -41,47 +41,49 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS)
   if(COMPILER_RT_BUILD_BUILTINS)
     add_subdirectory(builtins)
   endif()
-  if(COMPILER_RT_HAS_ASAN)
-    add_subdirectory(asan)
+  if(COMPILER_RT_BUILD_SANITIZERS)
+    if(COMPILER_RT_HAS_ASAN)
+      add_subdirectory(asan)
+    endif()
+    if(COMPILER_RT_HAS_DFSAN)
+      add_subdirectory(dfsan)
+    endif()
+    if (COMPILER_RT_HAS_INTERCEPTION)
+      add_subdirectory(interception)
+    endif()
+    if(COMPILER_RT_HAS_LSAN)
+      add_subdirectory(lsan)
+    endif()
+    if(COMPILER_RT_HAS_MSAN)
+      add_subdirectory(msan)
+    endif()
+    if(COMPILER_RT_HAS_PROFILE)
+      add_subdirectory(profile)
+    endif()
+    if(COMPILER_RT_HAS_SANITIZER_COMMON)
+      add_subdirectory(sanitizer_common)
+    endif()
+    if(COMPILER_RT_HAS_TSAN)
+      add_subdirectory(tsan)
+    endif()
+    if(COMPILER_RT_HAS_UBSAN)
+      add_subdirectory(ubsan)
+    endif()
+    # CFI tests require diagnostic mode, which is implemented in UBSan.
+    if(COMPILER_RT_HAS_UBSAN)
+      add_subdirectory(cfi)
+    endif()
+    if(COMPILER_RT_HAS_SAFESTACK)
+      add_subdirectory(safestack)
+    endif()
+    if(COMPILER_RT_HAS_ESAN)
+      add_subdirectory(esan)
+    endif()
+    if(COMPILER_RT_HAS_SCUDO)
+      add_subdirectory(scudo)
+    endif()
   endif()
-  if(COMPILER_RT_HAS_DFSAN)
-    add_subdirectory(dfsan)
-  endif()
-  if (COMPILER_RT_HAS_INTERCEPTION)
-    add_subdirectory(interception)
-  endif()
-  if(COMPILER_RT_HAS_LSAN)
-    add_subdirectory(lsan)
-  endif()
-  if(COMPILER_RT_HAS_MSAN)
-    add_subdirectory(msan)
-  endif()
-  if(COMPILER_RT_HAS_PROFILE)
-    add_subdirectory(profile)
-  endif()
-  if(COMPILER_RT_HAS_SANITIZER_COMMON)
-    add_subdirectory(sanitizer_common)
-  endif()
-  if(COMPILER_RT_HAS_TSAN)
-    add_subdirectory(tsan)
-  endif()
-  if(COMPILER_RT_HAS_UBSAN)
-    add_subdirectory(ubsan)
-  endif()
-  # CFI tests require diagnostic mode, which is implemented in UBSan.
-  if(COMPILER_RT_HAS_UBSAN)
-    add_subdirectory(cfi)
-  endif()
-  if(COMPILER_RT_HAS_SAFESTACK)
-    add_subdirectory(safestack)
-  endif()
-  if(COMPILER_RT_HAS_ESAN)
-    add_subdirectory(esan)
-  endif()
-  if(COMPILER_RT_HAS_SCUDO)
-    add_subdirectory(scudo)
-  endif()
-  if(COMPILER_RT_HAS_XRAY)
+  if(COMPILER_RT_BUILD_XRAY AND COMPILER_RT_HAS_XRAY)
     add_subdirectory(xray)
   endif()
 endif()

Modified: vendor/compiler-rt/dist/test/asan/TestCases/Linux/read_binary_name_regtest.c
==============================================================================
--- vendor/compiler-rt/dist/test/asan/TestCases/Linux/read_binary_name_regtest.c	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/test/asan/TestCases/Linux/read_binary_name_regtest.c	Wed Apr 26 19:24:20 2017	(r317449)
@@ -3,6 +3,7 @@
 // This test uses seccomp-BPF to restrict the readlink() system call and makes
 // sure ASan is still able to
 // RUN: not ls /usr/include/linux/seccomp.h || ( %clang_asan %s -o %t && not %run %t 2>&1 | FileCheck %s )
+// REQUIRES: shell
 // UNSUPPORTED: android
 
 #include <errno.h>

Added: vendor/compiler-rt/dist/test/asan/TestCases/Linux/textdomain.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor/compiler-rt/dist/test/asan/TestCases/Linux/textdomain.c	Wed Apr 26 19:24:20 2017	(r317449)
@@ -0,0 +1,10 @@
+// RUN: %clang_asan -O0 -g %s -o %t
+// RUN: %env_asan_opts=strict_string_checks=1 %run %t
+
+#include <stdlib.h>
+#include <libintl.h>
+
+int main() {
+  textdomain(NULL);
+  return 0;
+}

Modified: vendor/compiler-rt/dist/test/asan/TestCases/Posix/strchr.c
==============================================================================
--- vendor/compiler-rt/dist/test/asan/TestCases/Posix/strchr.c	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/test/asan/TestCases/Posix/strchr.c	Wed Apr 26 19:24:20 2017	(r317449)
@@ -27,9 +27,7 @@ int main(int argc, char **argv) {
   if (mprotect(p + 1, 1, PROT_NONE))
     return 1;
   char *r = strchr(s, 'x');
-  // CHECK: AddressSanitizer: SEGV on unknown address
-  // CHECK: The signal is caused by a READ memory access
-  // CHECK: strchr.c:[[@LINE-3]]
+  // CHECK: AddressSanitizer: {{SEGV|BUS}} on unknown address
   assert(r == p);
 
   return 0;

Modified: vendor/compiler-rt/dist/test/asan/TestCases/Windows/dll_global_dead_strip.c
==============================================================================
--- vendor/compiler-rt/dist/test/asan/TestCases/Windows/dll_global_dead_strip.c	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/test/asan/TestCases/Windows/dll_global_dead_strip.c	Wed Apr 26 19:24:20 2017	(r317449)
@@ -1,8 +1,8 @@
 // RUN: %clang_cl_asan -O0 %p/dll_host.cc -Fe%t
 //
-// RUN: %clang_cl_asan -LD -O0 %s -Fe%t.dll
+// RUN: %clang_cl_asan /Gw -LD -O0 %s -Fe%t.dll
 // RUN: %env_asan_opts=report_globals=2 %run %t %t.dll 2>&1 | FileCheck %s --check-prefix=NOSTRIP
-// RUN: %clang_cl_asan -LD -O2 %s -Fe%t.dll -link -opt:ref
+// RUN: %clang_cl_asan /Gw -LD -O2 %s -Fe%t.dll -link -opt:ref
 // RUN: %env_asan_opts=report_globals=2 %run %t %t.dll 2>&1 | FileCheck %s --check-prefix=STRIP
 
 #include <stdio.h>

Modified: vendor/compiler-rt/dist/test/asan/TestCases/Windows/fuse-lld.cc
==============================================================================
--- vendor/compiler-rt/dist/test/asan/TestCases/Windows/fuse-lld.cc	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/test/asan/TestCases/Windows/fuse-lld.cc	Wed Apr 26 19:24:20 2017	(r317449)
@@ -1,6 +1,6 @@
 // If we have LLD, see that things more or less work.
 //
-// REQUIRES: lld
+// REQUIRES: lld-available
 //
 // FIXME: Use -fuse-ld=lld after the old COFF linker is removed.
 // FIXME: Test will fail until we add flags for requesting dwarf or cv.

Modified: vendor/compiler-rt/dist/test/asan/TestCases/Windows/global_dead_strip.c
==============================================================================
--- vendor/compiler-rt/dist/test/asan/TestCases/Windows/global_dead_strip.c	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/test/asan/TestCases/Windows/global_dead_strip.c	Wed Apr 26 19:24:20 2017	(r317449)
@@ -1,6 +1,6 @@
-// RUN: %clang_cl_asan /O0 %s /Fe%t.exe
+// RUN: %clang_cl_asan /Gw /O0 %s /Fe%t.exe
 // RUN: %env_asan_opts=report_globals=2 %t.exe 2>&1 | FileCheck %s --check-prefix=NOSTRIP
-// RUN: %clang_cl_asan /O2 %s /Fe%t.exe -link -opt:ref
+// RUN: %clang_cl_asan /Gw /O2 %s /Fe%t.exe -link -opt:ref
 // RUN: %env_asan_opts=report_globals=2 %t.exe 2>&1 | FileCheck %s --check-prefix=STRIP
 
 #include <stdio.h>

Modified: vendor/compiler-rt/dist/test/asan/android_commands/android_run.py
==============================================================================
--- vendor/compiler-rt/dist/test/asan/android_commands/android_run.py	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/test/asan/android_commands/android_run.py	Wed Apr 26 19:24:20 2017	(r317449)
@@ -18,15 +18,14 @@ def build_env():
     return ' '.join(args)
 
 is_64bit = (subprocess.check_output(['file', sys.argv[0] + '.real']).find('64-bit') != -1)
-asanwrapper = "" if is_64bit else "asanwrapper "
 
 device_env = build_env()
 device_args = ' '.join(sys.argv[1:]) # FIXME: escape?
 device_stdout = device_binary + '.stdout'
 device_stderr = device_binary + '.stderr'
 device_exitcode = device_binary + '.exitcode'
-ret = adb(['shell', 'cd %s && %s %s%s %s >%s 2>%s ; echo $? >%s' %
-           (ANDROID_TMPDIR, device_env, asanwrapper, device_binary, device_args,
+ret = adb(['shell', 'cd %s && %s %s %s >%s 2>%s ; echo $? >%s' %
+           (ANDROID_TMPDIR, device_env, device_binary, device_args,
             device_stdout, device_stderr, device_exitcode)])
 if ret != 0:
     sys.exit(ret)

Modified: vendor/compiler-rt/dist/test/cfi/CMakeLists.txt
==============================================================================
--- vendor/compiler-rt/dist/test/cfi/CMakeLists.txt	Wed Apr 26 19:24:17 2017	(r317448)
+++ vendor/compiler-rt/dist/test/cfi/CMakeLists.txt	Wed Apr 26 19:24:20 2017	(r317449)
@@ -1,14 +1,48 @@
-set(CFI_LIT_TEST_MODE Standalone)
-configure_lit_site_cfg(
-  ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in
-  ${CMAKE_CURRENT_BINARY_DIR}/Standalone/lit.site.cfg
-  )

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list