diff --git a/zircon/kernel/arch/x86/hypervisor/pvclock.cpp b/zircon/kernel/arch/x86/hypervisor/pvclock.cpp index fe67eeb818f3f5095d4b34d830804006586ec974..ea12b50644790991360c84f137a6e215f7affd5c 100644 --- a/zircon/kernel/arch/x86/hypervisor/pvclock.cpp +++ b/zircon/kernel/arch/x86/hypervisor/pvclock.cpp @@ -11,6 +11,7 @@ #include <hypervisor/guest_physical_address_space.h> #include <platform.h> #include <vm/physmap.h> +#include <zircon/thread_annotations.h> #include <zircon/types.h> namespace { @@ -71,7 +72,7 @@ zx_status_t pvclock_update_boot_time(hypervisor::GuestPhysicalAddressSpace* gpas // VCPUs, but documentation doesn't mention that it cannot happen and moreover it properly // protects per VCPU system time. Therefore to be on the safer side we use one global mutex // for protection. - static uint32_t version __TA_GUARDED(UpdateBootTimeLock::Get()); + static uint32_t version TA_GUARDED(UpdateBootTimeLock::Get()); hypervisor::GuestPtr guest_ptr; zx_status_t status = gpas->CreateGuestPtr(guest_paddr, sizeof(pvclock_boot_time), diff --git a/zircon/kernel/include/kernel/mutex.h b/zircon/kernel/include/kernel/mutex.h index 717e5073f0c0f4d42f9def510e756844207af253..fffc11515c09742cce076cb79709f43f928e9097 100644 --- a/zircon/kernel/include/kernel/mutex.h +++ b/zircon/kernel/include/kernel/mutex.h @@ -18,10 +18,11 @@ #include <ktl/atomic.h> #include <stdint.h> #include <zircon/compiler.h> +#include <zircon/thread_annotations.h> // Kernel mutex support. // -class __TA_CAPABILITY("mutex") Mutex { +class TA_CAP("mutex") Mutex { public: constexpr Mutex() = default; ~Mutex(); @@ -29,11 +30,11 @@ public: // No moving or copying allowed. DISALLOW_COPY_ASSIGN_AND_MOVE(Mutex); - void Acquire() __TA_ACQUIRE() __TA_EXCLUDES(thread_lock); - void Release() __TA_RELEASE() __TA_EXCLUDES(thread_lock); + void Acquire() TA_ACQ() TA_EXCL(thread_lock); + void Release() TA_REL() TA_EXCL(thread_lock); // Special version of Release which operates with the thread lock held - void ReleaseThreadLocked(const bool allow_reschedule) __TA_RELEASE() __TA_REQUIRES(thread_lock); + void ReleaseThreadLocked(const bool allow_reschedule) TA_REL() TA_REQ(thread_lock); // does the current thread hold the mutex? bool IsHeld() const { @@ -51,7 +52,7 @@ private: static constexpr uintptr_t STATE_FLAG_CONTESTED = 1u; template <ThreadLockState TLS> - void ReleaseInternal(const bool allow_reschedule) __TA_RELEASE() __TA_NO_THREAD_SAFETY_ANALYSIS; + void ReleaseInternal(const bool allow_reschedule) TA_REL() __TA_NO_THREAD_SAFETY_ANALYSIS; // Accessors to extract the holder pointer from the val member uintptr_t val() const { @@ -80,12 +81,12 @@ struct MutexPolicy { // Basic acquire and release operations. template <typename LockType> - static bool Acquire(LockType* lock, State*) __TA_ACQUIRE(lock) __TA_EXCLUDES(thread_lock) { + static bool Acquire(LockType* lock, State*) TA_ACQ(lock) TA_EXCL(thread_lock) { lock->Acquire(); return true; } template <typename LockType> - static void Release(LockType* lock, State*) __TA_RELEASE(lock) __TA_EXCLUDES(thread_lock) { + static void Release(LockType* lock, State*) TA_REL(lock) TA_EXCL(thread_lock) { lock->Release(); } @@ -110,7 +111,7 @@ struct MutexPolicy { State*, SelectThreadLockHeld, RescheduleOption reschedule = Reschedule) - __TA_RELEASE(lock) __TA_REQUIRES(thread_lock) { + TA_REL(lock) TA_REQ(thread_lock) { lock->ReleaseThreadLocked(reschedule); } }; diff --git a/zircon/kernel/lib/heap/cmpctmalloc/include/lib/cmpctmalloc.h b/zircon/kernel/lib/heap/cmpctmalloc/include/lib/cmpctmalloc.h index 25337f28b3dfa010f269303bae1c0747f1a4d021..324f3832b81d46960f88c8a1e449e5fba8ca7fbe 100644 --- a/zircon/kernel/lib/heap/cmpctmalloc/include/lib/cmpctmalloc.h +++ b/zircon/kernel/lib/heap/cmpctmalloc/include/lib/cmpctmalloc.h @@ -12,16 +12,17 @@ #include <stddef.h> #include <zircon/compiler.h> +#include <zircon/thread_annotations.h> DECLARE_SINGLETON_MUTEX(TheHeapLock); -void* cmpct_alloc(size_t) __TA_EXCLUDES(TheHeapLock::Get()); -void* cmpct_realloc(void*, size_t) __TA_EXCLUDES(TheHeapLock::Get()); -void cmpct_free(void*) __TA_EXCLUDES(TheHeapLock::Get()); -void* cmpct_memalign(size_t size, size_t alignment) __TA_EXCLUDES(TheHeapLock::Get()); +void* cmpct_alloc(size_t) TA_EXCL(TheHeapLock::Get()); +void* cmpct_realloc(void*, size_t) TA_EXCL(TheHeapLock::Get()); +void cmpct_free(void*) TA_EXCL(TheHeapLock::Get()); +void* cmpct_memalign(size_t size, size_t alignment) TA_EXCL(TheHeapLock::Get()); -void cmpct_init(void) __TA_EXCLUDES(TheHeapLock::Get()); -void cmpct_dump(bool panic_time) __TA_EXCLUDES(TheHeapLock::Get()); -void cmpct_get_info(size_t* size_bytes, size_t* free_bytes) __TA_EXCLUDES(TheHeapLock::Get()); -void cmpct_test(void) __TA_EXCLUDES(TheHeapLock::Get()); -void cmpct_trim(void) __TA_EXCLUDES(TheHeapLock::Get()); +void cmpct_init(void) TA_EXCL(TheHeapLock::Get()); +void cmpct_dump(bool panic_time) TA_EXCL(TheHeapLock::Get()); +void cmpct_get_info(size_t* size_bytes, size_t* free_bytes) TA_EXCL(TheHeapLock::Get()); +void cmpct_test(void) TA_EXCL(TheHeapLock::Get()); +void cmpct_trim(void) TA_EXCL(TheHeapLock::Get()); diff --git a/zircon/kernel/tests/lock_dep_tests.cpp b/zircon/kernel/tests/lock_dep_tests.cpp index 3ca3e34056027d25df16b7a6fde922fa647765ba..bdc47acf2a1df583dc6deaaf958bfca6e02956fa 100644 --- a/zircon/kernel/tests/lock_dep_tests.cpp +++ b/zircon/kernel/tests/lock_dep_tests.cpp @@ -11,6 +11,7 @@ #include <lockdep/guard_multiple.h> #include <lockdep/lockdep.h> #include <stdint.h> +#include <zircon/thread_annotations.h> #if WITH_LOCK_DEP_TESTS @@ -23,22 +24,22 @@ bool g_try_lock_succeeds = true; struct Spinlock : Mutex { using Mutex::Mutex; - bool AcquireIrqSave(uint64_t* flags) __TA_ACQUIRE() { + bool AcquireIrqSave(uint64_t* flags) TA_ACQUIRE() { (void)flags; Acquire(); return true; } - void ReleaseIrqRestore(uint64_t flags) __TA_RELEASE() { + void ReleaseIrqRestore(uint64_t flags) TA_RELEASE() { (void)flags; Release(); } - bool TryAcquire() __TA_TRY_ACQUIRE(true) { + bool TryAcquire() TA_TRY_ACQUIRE(true) { if (g_try_lock_succeeds) Acquire(); return g_try_lock_succeeds; } - bool TryAcquireIrqSave(uint64_t* flags) __TA_TRY_ACQUIRE(true) { + bool TryAcquireIrqSave(uint64_t* flags) TA_TRY_ACQUIRE(true) { (void)flags; if (g_try_lock_succeeds) Acquire(); @@ -71,11 +72,11 @@ struct TryNoIrqSave {}; struct SpinlockNoIrqSave { struct State {}; - static bool Acquire(Spinlock* lock, State*) __TA_ACQUIRE(lock) { + static bool Acquire(Spinlock* lock, State*) TA_ACQUIRE(lock) { lock->Acquire(); return true; } - static void Release(Spinlock* lock, State*) __TA_RELEASE(lock) { + static void Release(Spinlock* lock, State*) TA_RELEASE(lock) { lock->Release(); } }; @@ -87,11 +88,11 @@ struct SpinlockIrqSave { uint64_t flags; }; - static bool Acquire(Spinlock* lock, State* state) __TA_ACQUIRE(lock) { + static bool Acquire(Spinlock* lock, State* state) TA_ACQUIRE(lock) { lock->AcquireIrqSave(&state->flags); return true; } - static void Release(Spinlock* lock, State* state) __TA_RELEASE(lock) { + static void Release(Spinlock* lock, State* state) TA_RELEASE(lock) { lock->ReleaseIrqRestore(state->flags); } }; @@ -100,10 +101,10 @@ LOCK_DEP_POLICY_OPTION(Spinlock, IrqSave, SpinlockIrqSave); struct SpinlockTryNoIrqSave { struct State {}; - static bool Acquire(Spinlock* lock, State*) __TA_TRY_ACQUIRE(true, lock) { + static bool Acquire(Spinlock* lock, State*) TA_TRY_ACQUIRE(true, lock) { return lock->TryAcquire(); } - static void Release(Spinlock* lock, State*) __TA_RELEASE(lock) { + static void Release(Spinlock* lock, State*) TA_RELEASE(lock) { lock->Release(); } }; @@ -115,10 +116,10 @@ struct SpinlockTryIrqSave { uint64_t flags; }; - static bool Acquire(Spinlock* lock, State* state) __TA_TRY_ACQUIRE(true, lock) { + static bool Acquire(Spinlock* lock, State* state) TA_TRY_ACQUIRE(true, lock) { return lock->TryAcquireIrqSave(&state->flags); } - static void Release(Spinlock* lock, State* state) __TA_RELEASE(lock) { + static void Release(Spinlock* lock, State* state) TA_RELEASE(lock) { lock->ReleaseIrqRestore(state->flags); } }; @@ -192,32 +193,32 @@ struct Nestable : ::Mutex { }; LOCK_DEP_TRAITS(Nestable, lockdep::LockFlagsNestable); -struct __TA_CAPABILITY("mutex") ReadWriteLock { - bool AcquireWrite() __TA_ACQUIRE() { +struct TA_CAPABILITY("mutex") ReadWriteLock { + bool AcquireWrite() TA_ACQUIRE() { return true; } - bool AcquireRead() __TA_ACQUIRE_SHARED() { + bool AcquireRead() TA_ACQUIRE_SHARED() { return true; } - void Release() __TA_RELEASE() {} + void Release() TA_RELEASE() {} struct Read { struct State {}; struct Shared {}; - static bool Acquire(ReadWriteLock* lock, State*) __TA_ACQUIRE_SHARED(lock) { + static bool Acquire(ReadWriteLock* lock, State*) TA_ACQUIRE_SHARED(lock) { return lock->AcquireRead(); } - static void Release(ReadWriteLock* lock, State*) __TA_RELEASE(lock) { + static void Release(ReadWriteLock* lock, State*) TA_RELEASE(lock) { lock->Release(); } }; struct Write { struct State {}; - static bool Acquire(ReadWriteLock* lock, State*) __TA_ACQUIRE(lock) { + static bool Acquire(ReadWriteLock* lock, State*) TA_ACQUIRE(lock) { return lock->AcquireWrite(); } - static void Release(ReadWriteLock* lock, State*) __TA_RELEASE(lock) { + static void Release(ReadWriteLock* lock, State*) TA_RELEASE(lock) { lock->Release(); } }; @@ -228,42 +229,42 @@ LOCK_DEP_POLICY_OPTION(ReadWriteLock, ReadWriteLock::Write, ReadWriteLock::Write struct Foo { LOCK_DEP_INSTRUMENT(Foo, Mutex) lock; - void TestRequire() __TA_REQUIRES(lock) {} - void TestExclude() __TA_EXCLUDES(lock) {} + void TestRequire() TA_REQUIRES(lock) {} + void TestExclude() TA_EXCLUDES(lock) {} }; struct Bar { LOCK_DEP_INSTRUMENT(Bar, Mutex) lock; - void TestRequire() __TA_REQUIRES(lock) {} - void TestExclude() __TA_EXCLUDES(lock) {} + void TestRequire() TA_REQUIRES(lock) {} + void TestExclude() TA_EXCLUDES(lock) {} }; template <typename LockType> struct Baz { LOCK_DEP_INSTRUMENT(Baz, LockType) lock; - void TestRequire() __TA_REQUIRES(lock) {} - void TestExclude() __TA_EXCLUDES(lock) {} - void TestShared() __TA_REQUIRES_SHARED(lock) {} + void TestRequire() TA_REQUIRES(lock) {} + void TestExclude() TA_EXCLUDES(lock) {} + void TestShared() TA_REQUIRES_SHARED(lock) {} }; struct MultipleLocks { LOCK_DEP_INSTRUMENT(MultipleLocks, Mutex) lock_a; LOCK_DEP_INSTRUMENT(MultipleLocks, Mutex) lock_b; - void TestRequireLockA() __TA_REQUIRES(lock_a) {} - void TestExcludeLockA() __TA_EXCLUDES(lock_a) {} - void TestRequireLockB() __TA_REQUIRES(lock_b) {} - void TestExcludeLockB() __TA_EXCLUDES(lock_b) {} + void TestRequireLockA() TA_REQUIRES(lock_a) {} + void TestExcludeLockA() TA_EXCLUDES(lock_a) {} + void TestRequireLockB() TA_REQUIRES(lock_b) {} + void TestExcludeLockB() TA_EXCLUDES(lock_b) {} }; template <size_t Index> struct Number { LOCK_DEP_INSTRUMENT(Number, Mutex) lock; - void TestRequire() __TA_REQUIRES(lock) {} - void TestExclude() __TA_EXCLUDES(lock) {} + void TestRequire() TA_REQUIRES(lock) {} + void TestExclude() TA_EXCLUDES(lock) {} }; lockdep::LockResult GetLastResult() { diff --git a/zircon/system/public/zircon/compiler.h b/zircon/system/public/zircon/compiler.h index f9daaabd6936d368cdf1b52cf6c79ec810db7518..7b9dac3f6ee3c85024d75635c8509fc7fb559f4e 100644 --- a/zircon/system/public/zircon/compiler.h +++ b/zircon/system/public/zircon/compiler.h @@ -40,13 +40,20 @@ #define __LEAF_FN __attribute__((__leaf__)) #define __OPTIMIZE(x) __attribute__((__optimize__(x))) #define __EXTERNALLY_VISIBLE __attribute__((__externally_visible__)) -#define __THREAD_ANNOTATION(x) #define __NO_SAFESTACK +#define __THREAD_ANNOTATION(x) #else #define __LEAF_FN #define __OPTIMIZE(x) #define __EXTERNALLY_VISIBLE +// The thread safety annotations are frequently used with C++ standard library +// types in userspace, so only enable the annotations if we know that the C++ +// standard library types are annotated or if we're in kernel code. +#if defined(_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS) || defined(_KERNEL) #define __THREAD_ANNOTATION(x) __attribute__((x)) +#else +#define __THREAD_ANNOTATION(x) +#endif // _LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS #define __NO_SAFESTACK __attribute__((__no_sanitize__("safe-stack"))) #endif