// ***************************************************************************** // * This file is part of the FreeFileSync project. It is distributed under * // * GNU General Public License: https://www.gnu.org/licenses/gpl-3.0 * // * Copyright (C) Zenju (zenju AT freefilesync DOT org) - All Rights Reserved * // ***************************************************************************** #ifndef GLOBALS_H_8013740213748021573485 #define GLOBALS_H_8013740213748021573485 #include #include #include "scope_guard.h" #include "legacy_compiler.h" namespace zen { /* Solve static destruction order fiasco by providing shared ownership and serialized access to global variables => there may be accesses to "Global::get()" during process shutdown e.g. _("") used by message in debug_minidump.cpp or by some detached thread assembling an error message! => use trivially-destructible POD only!!! ATTENTION: function-static globals have the compiler generate "magic statics" == compiler-genenerated locking code which will crash or leak memory when accessed after global is "dead" => "solved" by FunStatGlobal, but we can't have "too many" of these... */ class PodSpinMutex { public: bool tryLock(); void lock(); void unlock(); bool isLocked(); private: std::atomic_flag flag_{}; /* => avoid potential contention with worker thread during Global<> construction! - "For an atomic_flag with static storage duration, this guarantees static initialization:" => just what the doctor ordered! - "[default initialization] initializes std::atomic_flag to clear state" - since C++20 => - "std::atomic_flag is [...] guaranteed to be lock-free" - interestingly, is_trivially_constructible_v<> is false, thanks to constexpr! https://developercommunity.visualstudio.com/content/problem/416343/stdatomic-no-longer-is-trivially-constructible.html */ }; #define GLOBAL_RUN_ONCE(X) \ struct ZEN_CONCAT(GlobalInitializer, __LINE__) \ { \ ZEN_CONCAT(GlobalInitializer, __LINE__)() { X; } \ } ZEN_CONCAT(globalInitializer, __LINE__) template class Global //don't use for function-scope statics! { public: consteval2 Global() {}; //demand static zero-initialization! ~Global() { static_assert(std::is_trivially_destructible_v, "this memory needs to live forever"); set(nullptr); } std::shared_ptr get() //=> return std::shared_ptr to let instance life time be handled by caller (MT usage!) { pod_.spinLock.lock(); ZEN_ON_SCOPE_EXIT(pod_.spinLock.unlock()); if (pod_.inst) return *pod_.inst; return nullptr; } void set(std::unique_ptr&& newInst) { std::shared_ptr* tmpInst = nullptr; if (newInst) tmpInst = new std::shared_ptr(std::move(newInst)); { pod_.spinLock.lock(); ZEN_ON_SCOPE_EXIT(pod_.spinLock.unlock()); std::swap(pod_.inst, tmpInst); } delete tmpInst; } private: struct Pod { PodSpinMutex spinLock; //rely entirely on static zero-initialization! => avoid potential contention with worker thread during Global<> construction! //serialize access: can't use std::mutex: has non-trival destructor std::shared_ptr* inst = nullptr; } pod_; }; //=================================================================================================================== //=================================================================================================================== struct CleanUpEntry { using CleanUpFunction = void (*)(void* callbackData); CleanUpFunction cleanUpFun = nullptr; void* callbackData = nullptr; CleanUpEntry* prev = nullptr; }; void registerGlobalForDestruction(CleanUpEntry& entry); template class FunStatGlobal { public: consteval2 FunStatGlobal() {}; //demand static zero-initialization! //No ~FunStatGlobal()! void initOnce(std::unique_ptr (*getInitialValue)()) { static_assert(std::is_trivially_destructible_v, "this class must not generate code for magic statics!"); pod_.spinLock.lock(); ZEN_ON_SCOPE_EXIT(pod_.spinLock.unlock()); if (!pod_.cleanUpEntry.cleanUpFun) { assert(!pod_.inst); if (std::unique_ptr newInst = (*getInitialValue)()) pod_.inst = new std::shared_ptr(std::move(newInst)); registerDestruction(); } } std::shared_ptr get() { pod_.spinLock.lock(); ZEN_ON_SCOPE_EXIT(pod_.spinLock.unlock()); if (pod_.inst) return *pod_.inst; return nullptr; } void set(std::unique_ptr&& newInst) { std::shared_ptr* tmpInst = nullptr; if (newInst) tmpInst = new std::shared_ptr(std::move(newInst)); { pod_.spinLock.lock(); ZEN_ON_SCOPE_EXIT(pod_.spinLock.unlock()); std::swap(pod_.inst, tmpInst); registerDestruction(); } delete tmpInst; } private: //call while holding pod_.spinLock void registerDestruction() { assert(pod_.spinLock.isLocked()); if (!pod_.cleanUpEntry.cleanUpFun) { pod_.cleanUpEntry.callbackData = this; pod_.cleanUpEntry.cleanUpFun = [](void* callbackData) { auto thisPtr = static_cast(callbackData); thisPtr->set(nullptr); }; registerGlobalForDestruction(pod_.cleanUpEntry); } } struct Pod { PodSpinMutex spinLock; //rely entirely on static zero-initialization! => avoid potential contention with worker thread during Global<> construction! //serialize access; can't use std::mutex: has non-trival destructor std::shared_ptr* inst = nullptr; CleanUpEntry cleanUpEntry; } pod_; }; inline void registerGlobalForDestruction(CleanUpEntry& entry) { static struct { PodSpinMutex spinLock; CleanUpEntry* head; } cleanUpList; static_assert(std::is_trivially_destructible_v, "we must not generate code for magic statics!"); cleanUpList.spinLock.lock(); ZEN_ON_SCOPE_EXIT(cleanUpList.spinLock.unlock()); std::atexit([] { cleanUpList.spinLock.lock(); ZEN_ON_SCOPE_EXIT(cleanUpList.spinLock.unlock()); (*cleanUpList.head->cleanUpFun)(cleanUpList.head->callbackData); cleanUpList.head = cleanUpList.head->prev; //nicely clean up in reverse order of construction }); entry.prev = cleanUpList.head; cleanUpList.head = &entry; } //------------------------------------------------------------------------------------------ #ifdef __cpp_lib_atomic_wait #error implement + rewiew improvements #endif inline bool PodSpinMutex::tryLock() { return !flag_.test_and_set(std::memory_order_acquire); } inline void PodSpinMutex::lock() { while (!tryLock()) #ifdef __cpp_lib_atomic_wait flag_.wait(true, std::memory_order_relaxed); #else ; #endif } inline void PodSpinMutex::unlock() { flag_.clear(std::memory_order_release); #ifdef __cpp_lib_atomic_wait flag_.notify_one(); #endif } inline bool PodSpinMutex::isLocked() { if (!tryLock()) return true; unlock(); return false; } } #endif //GLOBALS_H_8013740213748021573485