summaryrefslogtreecommitdiff
path: root/zen/globals.h
diff options
context:
space:
mode:
authorB Stack <bgstack15@gmail.com>2018-10-17 02:11:26 +0000
committerB Stack <bgstack15@gmail.com>2018-10-17 02:11:26 +0000
commitf70f8f961ef8f4d909266f71310e3515f25928e6 (patch)
tree89b2a018482c164bdd8ecac5c76b19a08f420dec /zen/globals.h
parentMerge branch '10.4' into 'master' (diff)
parent10.5 (diff)
downloadFreeFileSync-f70f8f961ef8f4d909266f71310e3515f25928e6.tar.gz
FreeFileSync-f70f8f961ef8f4d909266f71310e3515f25928e6.tar.bz2
FreeFileSync-f70f8f961ef8f4d909266f71310e3515f25928e6.zip
Merge branch '10.5' into 'master'10.5
10.5 See merge request opensource-tracking/FreeFileSync!2
Diffstat (limited to 'zen/globals.h')
-rwxr-xr-xzen/globals.h135
1 files changed, 129 insertions, 6 deletions
diff --git a/zen/globals.h b/zen/globals.h
index 10975414..024147fa 100755
--- a/zen/globals.h
+++ b/zen/globals.h
@@ -14,14 +14,22 @@
namespace zen
{
-//solve static destruction order fiasco by providing shared ownership and serialized access to global variables
+/*
+Solve static destruction order fiasco by providing shared ownership and serialized access to global variables
+
+=>there may be accesses to "Global<T>::get()" during process shutdown e.g. _("") used by message in debug_minidump.cpp or by some detached thread assembling an error message!
+=> use trivially-destructible POD only!!!
+
+ATTENTION: function-static globals have the compiler generate "magic statics" == compiler-genenerated locking code which will crash or leak memory when accessed after global is "dead"
+ => "solved" by FunStatGlobal, but we can't have "too many" of these...
+*/
template <class T>
-class Global
+class Global //don't use for function-scope statics!
{
public:
Global()
{
- static_assert(std::is_trivially_destructible_v<Pod>, "this memory needs to live forever");
+ static_assert(std::is_trivially_constructible_v<Pod>&& std::is_trivially_destructible_v<Pod>, "this memory needs to live forever");
assert(!pod_.inst && !pod_.spinLock); //we depend on static zero-initialization!
}
@@ -52,16 +60,131 @@ public:
}
private:
- //avoid static destruction order fiasco: there may be accesses to "Global<T>::get()" during process shutdown
- //e.g. _("") used by message in debug_minidump.cpp or by some detached thread assembling an error message!
- //=> use trivially-destructible POD only!!!
struct Pod
{
+ std::atomic<bool> spinLock; // { false }; rely entirely on static zero-initialization! => avoid potential contention with worker thread during Global<> construction!
+ //serialize access; can't use std::mutex: has non-trival destructor
std::shared_ptr<T>* inst; // = nullptr;
+ } pod_;
+};
+
+//===================================================================================================================
+//===================================================================================================================
+
+struct CleanUpEntry
+{
+ using CleanUpFunction = void (*)(void* callbackData);
+ CleanUpFunction cleanUpFun;
+ void* callbackData;
+ CleanUpEntry* prev;
+};
+void registerGlobalForDestruction(CleanUpEntry& entry);
+
+
+template <class T>
+class FunStatGlobal
+{
+public:
+ //No FunStatGlobal() or ~FunStatGlobal()!
+
+ std::shared_ptr<T> get()
+ {
+ static_assert(std::is_trivially_constructible_v<FunStatGlobal>&&
+ std::is_trivially_destructible_v<FunStatGlobal>, "this class must not generate code for magic statics!");
+
+ while (pod_.spinLock.exchange(true)) ;
+ ZEN_ON_SCOPE_EXIT(pod_.spinLock = false);
+ if (pod_.inst)
+ return *pod_.inst;
+ return nullptr;
+ }
+
+ void set(std::unique_ptr<T>&& newInst)
+ {
+ std::shared_ptr<T>* tmpInst = nullptr;
+ if (newInst)
+ tmpInst = new std::shared_ptr<T>(std::move(newInst));
+ {
+ while (pod_.spinLock.exchange(true)) ;
+ ZEN_ON_SCOPE_EXIT(pod_.spinLock = false);
+
+ std::swap(pod_.inst, tmpInst);
+ registerDestruction();
+ }
+ delete tmpInst;
+ }
+
+ void initOnce(std::unique_ptr<T> (*getInitialValue)())
+ {
+ while (pod_.spinLock.exchange(true)) ;
+ ZEN_ON_SCOPE_EXIT(pod_.spinLock = false);
+
+ if (!pod_.cleanUpEntry.cleanUpFun)
+ {
+ assert(!pod_.inst);
+ if (std::unique_ptr<T> newInst = (*getInitialValue)())
+ pod_.inst = new std::shared_ptr<T>(std::move(newInst));
+ registerDestruction();
+ }
+ }
+
+private:
+ //call while holding pod_.spinLock
+ void registerDestruction()
+ {
+ assert(pod_.spinLock);
+
+ if (!pod_.cleanUpEntry.cleanUpFun)
+ {
+ pod_.cleanUpEntry.callbackData = this;
+ pod_.cleanUpEntry.cleanUpFun = [](void* callbackData)
+ {
+ auto thisPtr = static_cast<FunStatGlobal*>(callbackData);
+ thisPtr->set(nullptr);
+ };
+
+ registerGlobalForDestruction(pod_.cleanUpEntry);
+ }
+ }
+
+ struct Pod
+ {
std::atomic<bool> spinLock; // { false }; rely entirely on static zero-initialization! => avoid potential contention with worker thread during Global<> construction!
//serialize access; can't use std::mutex: has non-trival destructor
+ std::shared_ptr<T>* inst; // = nullptr;
+ CleanUpEntry cleanUpEntry;
} pod_;
};
+
+
+inline
+void registerGlobalForDestruction(CleanUpEntry& entry)
+{
+ static struct
+ {
+ std::atomic<bool> spinLock;
+ CleanUpEntry* head;
+ } cleanUpList;
+
+ static_assert(std::is_trivially_constructible_v<decltype(cleanUpList)>&&
+ std::is_trivially_destructible_v<decltype(cleanUpList)>, "we must not generate code for magic statics!");
+
+ while (cleanUpList.spinLock.exchange(true)) ;
+ ZEN_ON_SCOPE_EXIT(cleanUpList.spinLock = false);
+
+ std::atexit([]
+ {
+ while (cleanUpList.spinLock.exchange(true)) ;
+ ZEN_ON_SCOPE_EXIT(cleanUpList.spinLock = false);
+
+ (*cleanUpList.head->cleanUpFun)(cleanUpList.head->callbackData);
+ cleanUpList.head = cleanUpList.head->prev; //nicely clean up in reverse order of construction
+ });
+
+ entry.prev = cleanUpList.head;
+ cleanUpList.head = &entry;
+
+}
}
#endif //GLOBALS_H_8013740213748021573485
bgstack15