Added more mutexes in eventmanager, fixed exiting destruction bug

checksum-failure-logging
EricLiu2000 2019-08-07 13:12:16 -04:00
parent 9629864f1b
commit ef16e60025
5 changed files with 43 additions and 17 deletions

View File

@ -3,20 +3,32 @@
using namespace icsneo;
static std::unique_ptr<EventManager> singleton;
EventManager& EventManager::GetInstance() {
if(!singleton)
singleton = std::unique_ptr<EventManager>(new EventManager());
return *singleton.get();
static EventManager inst;
return inst;
}
void EventManager::ResetInstance() {
singleton = std::unique_ptr<EventManager>(new EventManager());
std::lock_guard<std::mutex> eventsLock(eventsMutex);
std::lock_guard<std::mutex> errorsLock(errorsMutex);
std::lock_guard<std::mutex> downgradedThreadsLock(downgradedThreadsMutex);
std::lock_guard<std::mutex> callbacksLock(callbacksMutex);
std::lock_guard<std::mutex> callbackIDLock(callbackIDMutex);
std::lock_guard<std::mutex> eventLimitLock(eventLimitMutex);
events.clear();
lastUserErrors.clear();
downgradedThreads.clear();
callbacks.clear();
callbackID = 0;
eventLimit = 10000;
}
int EventManager::addEventCallback(const EventCallback &cb) {
std::lock_guard<std::mutex> lk(callbacksMutex);
std::lock_guard<std::mutex> callbacksLock(callbacksMutex);
std::lock_guard<std::mutex> callbackIDLock(callbackIDMutex);
callbacks.insert({callbackID, cb});
return callbackID++;
}

View File

@ -1,5 +1,6 @@
#include "icsneo/device/device.h"
#include "icsneo/communication/message/callback/messagecallback.h"
#include "icsneo/api/eventmanager.h"
#include "icsneo/communication/command.h"
#include <string.h>
#include <iostream>

View File

@ -17,13 +17,19 @@ typedef std::function<void (APIEvent::Type, APIEvent::Severity)> device_eventhan
class EventManager {
public:
~EventManager() {
destructing = true;
}
static EventManager& GetInstance();
static void ResetInstance();
void ResetInstance();
// If this thread is not in the map, add it to be ignored
// If it is, set it to be ignored
void downgradeErrorsOnCurrentThread() {
if(destructing)
return;
std::lock_guard<std::mutex> lk(downgradedThreadsMutex);
auto i = downgradedThreads.find(std::this_thread::get_id());
if(i != downgradedThreads.end()) {
@ -35,6 +41,8 @@ public:
// If this thread exists in the map, turn off downgrading
void cancelErrorDowngradingOnCurrentThread() {
if(destructing)
return;
std::lock_guard<std::mutex> lk(downgradedThreadsMutex);
auto i = downgradedThreads.find(std::this_thread::get_id());
if(i != downgradedThreads.end()) {
@ -63,6 +71,8 @@ public:
APIEvent getLastError();
void add(APIEvent event) {
if(destructing)
return;
if(event.getSeverity() == APIEvent::Severity::Error) {
// if the error was added on a thread that downgrades errors (non-user thread)
std::lock_guard<std::mutex> lk(downgradedThreadsMutex);
@ -93,7 +103,7 @@ public:
void discard(EventFilter filter = EventFilter());
void setEventLimit(size_t newLimit) {
std::lock_guard<std::mutex> lk(eventsMutex);
std::lock_guard<std::mutex> eventLimitLock(eventLimitMutex);
if(newLimit == eventLimit)
return;
@ -104,17 +114,19 @@ public:
}
eventLimit = newLimit;
std::lock_guard<std::mutex> eventsLock(eventsMutex);
if(enforceLimit())
add_internal_event(APIEvent(APIEvent::Type::TooManyEvents, APIEvent::Severity::EventWarning));
}
size_t getEventLimit() const {
std::lock_guard<std::mutex> lk(eventsMutex);
std::lock_guard<std::mutex> lk(eventLimitMutex);
return eventLimit;
}
private:
EventManager() : eventsMutex(), errorsMutex(), downgradedThreadsMutex(), callbacksMutex(), downgradedThreads(), callbacks(), events(), lastUserErrors(), eventLimit(10000) {}
EventManager() : eventsMutex(), errorsMutex(), downgradedThreadsMutex(), callbacksMutex(), callbackIDMutex(), eventLimitMutex(), downgradedThreads(), callbacks(), events(), lastUserErrors(), eventLimit(10000) {}
EventManager(const EventManager &other);
EventManager& operator=(const EventManager &other);
@ -124,12 +136,17 @@ private:
mutable std::mutex downgradedThreadsMutex;
mutable std::mutex callbacksMutex;
mutable std::mutex callbackIDMutex;
mutable std::mutex eventLimitMutex;
std::map<std::thread::id, bool> downgradedThreads;
std::map<int, EventCallback> callbacks;
int callbackID = 0;
bool destructing = false;
// Stores all events
std::list<APIEvent> events;
std::map<std::thread::id, APIEvent> lastUserErrors;

View File

@ -22,7 +22,6 @@ namespace icsneo {
class Device {
public:
virtual ~Device() {
destructing = true;
if(isMessagePollingEnabled())
disableMessagePolling();
close();
@ -121,7 +120,6 @@ protected:
virtual device_eventhandler_t makeEventHandler() {
return [this](APIEvent::Type type, APIEvent::Severity severity) {
if(!destructing)
EventManager::GetInstance().add(type, severity, this);
};
}
@ -178,8 +176,6 @@ private:
size_t pollingMessageLimit = 20000;
moodycamel::BlockingConcurrentQueue<std::shared_ptr<Message>> pollingContainer;
void enforcePollingMessageLimit();
bool destructing = false;
};
}

View File

@ -11,7 +11,7 @@ class EventManagerTest : public ::testing::Test {
protected:
// Start with a clean instance of eventmanager for every test
void SetUp() override {
EventManager::ResetInstance();
EventManager::GetInstance().ResetInstance();
}
};