Fixed multithreading issue with eventmanager
parent
ec95f0f7c3
commit
a9a2a978b8
|
|
@ -12,11 +12,11 @@ EventManager& EventManager::GetInstance() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EventManager::ResetInstance() {
|
void EventManager::ResetInstance() {
|
||||||
singleton = nullptr;
|
singleton = std::unique_ptr<EventManager>(new EventManager());
|
||||||
}
|
}
|
||||||
|
|
||||||
void EventManager::get(std::vector<APIEvent>& eventOutput, size_t max, EventFilter filter) {
|
void EventManager::get(std::vector<APIEvent>& eventOutput, size_t max, EventFilter filter) {
|
||||||
std::lock_guard<std::mutex> lk(mutex);
|
std::unique_lock<std::shared_mutex> lk(mutex);
|
||||||
|
|
||||||
if(max == 0) // A limit of 0 indicates no limit
|
if(max == 0) // A limit of 0 indicates no limit
|
||||||
max = (size_t)-1;
|
max = (size_t)-1;
|
||||||
|
|
@ -27,34 +27,34 @@ void EventManager::get(std::vector<APIEvent>& eventOutput, size_t max, EventFilt
|
||||||
while(it != events.end()) {
|
while(it != events.end()) {
|
||||||
if(filter.match(*it)) {
|
if(filter.match(*it)) {
|
||||||
eventOutput.push_back(*it);
|
eventOutput.push_back(*it);
|
||||||
events.erase(it++);
|
it = events.erase(it);
|
||||||
if(count++ >= max)
|
if(count++ >= max)
|
||||||
break; // We now have as many written to output as we can
|
break; // We now have as many written to output as we can
|
||||||
} else {
|
} else {
|
||||||
std::advance(it, 1);
|
it++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes the returned error from the map
|
* Removes the returned error from the map
|
||||||
* If no error was found, return a default-constructed event
|
* If no error was found, return a NoErrorFound Info event
|
||||||
*/
|
*/
|
||||||
APIEvent EventManager::getLastError() {
|
APIEvent EventManager::getLastError() {
|
||||||
std::lock_guard<std::mutex> lk(mutex);
|
std::unique_lock<std::shared_mutex> lk(mutex);
|
||||||
|
|
||||||
auto it = lastUserErrors.find(std::this_thread::get_id());
|
auto it = lastUserErrors.find(std::this_thread::get_id());
|
||||||
if(it == lastUserErrors.end()) {
|
if(it == lastUserErrors.end()) {
|
||||||
return APIEvent(APIEvent::Type::NoErrorFound, APIEvent::Severity::EventInfo);
|
return APIEvent(APIEvent::Type::NoErrorFound, APIEvent::Severity::EventInfo);
|
||||||
} else {
|
} else {
|
||||||
APIEvent ret = it->second;
|
APIEvent ret = it->second;
|
||||||
lastUserErrors.erase(it);
|
it = lastUserErrors.erase(it);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void EventManager::discard(EventFilter filter) {
|
void EventManager::discard(EventFilter filter) {
|
||||||
std::lock_guard<std::mutex> lk(mutex);
|
std::unique_lock<std::shared_mutex> lk(mutex);
|
||||||
events.remove_if([&filter](const APIEvent& event) {
|
events.remove_if([&filter](const APIEvent& event) {
|
||||||
return filter.match(event);
|
return filter.match(event);
|
||||||
});
|
});
|
||||||
|
|
@ -87,7 +87,7 @@ bool EventManager::enforceLimit() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
APIEvent::Severity EventManager::lowestCurrentSeverity() {
|
APIEvent::Severity EventManager::lowestCurrentSeverity() const {
|
||||||
if(events.empty())
|
if(events.empty())
|
||||||
return APIEvent::Severity(0);
|
return APIEvent::Severity(0);
|
||||||
|
|
||||||
|
|
@ -110,7 +110,7 @@ void EventManager::discardLeastSevere(size_t count) {
|
||||||
auto it = events.begin();
|
auto it = events.begin();
|
||||||
while(it != events.end()) {
|
while(it != events.end()) {
|
||||||
if(infoFilter.match(*it)) {
|
if(infoFilter.match(*it)) {
|
||||||
events.erase(it++);
|
it = events.erase(it);
|
||||||
if(--count == 0)
|
if(--count == 0)
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -124,7 +124,7 @@ void EventManager::discardLeastSevere(size_t count) {
|
||||||
it = events.begin();
|
it = events.begin();
|
||||||
while(it != events.end()) {
|
while(it != events.end()) {
|
||||||
if(warningFilter.match(*it)) {
|
if(warningFilter.match(*it)) {
|
||||||
events.erase(it++);
|
it = events.erase(it);
|
||||||
if(--count == 0)
|
if(--count == 0)
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,10 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
#include <shared_mutex>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
#include<map>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include "icsneo/api/event.h"
|
#include "icsneo/api/event.h"
|
||||||
|
|
||||||
|
|
@ -20,7 +22,7 @@ public:
|
||||||
static void ResetInstance();
|
static void ResetInstance();
|
||||||
|
|
||||||
size_t eventCount(EventFilter filter = EventFilter()) const {
|
size_t eventCount(EventFilter filter = EventFilter()) const {
|
||||||
std::lock_guard<std::mutex> lk(mutex);
|
std::shared_lock<std::shared_mutex> lk(mutex);
|
||||||
return count_internal(filter);
|
return count_internal(filter);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -36,11 +38,11 @@ public:
|
||||||
APIEvent getLastError();
|
APIEvent getLastError();
|
||||||
|
|
||||||
void add(APIEvent event) {
|
void add(APIEvent event) {
|
||||||
std::lock_guard<std::mutex> lk(mutex);
|
std::unique_lock<std::shared_mutex> lk(mutex);
|
||||||
add_internal(event);
|
add_internal(event);
|
||||||
}
|
}
|
||||||
void add(APIEvent::Type type, APIEvent::Severity severity, const Device* forDevice = nullptr) {
|
void add(APIEvent::Type type, APIEvent::Severity severity, const Device* forDevice = nullptr) {
|
||||||
std::lock_guard<std::mutex> lk(mutex);
|
std::unique_lock<std::shared_mutex> lk(mutex);
|
||||||
add_internal(APIEvent(type, severity, forDevice));
|
add_internal(APIEvent(type, severity, forDevice));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -55,24 +57,28 @@ public:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lk(mutex);
|
std::unique_lock<std::shared_mutex> lk(mutex);
|
||||||
eventLimit = newLimit;
|
eventLimit = newLimit;
|
||||||
if(enforceLimit())
|
if(enforceLimit())
|
||||||
add_internal(APIEvent(APIEvent::Type::TooManyEvents, APIEvent::Severity::EventWarning));
|
add_internal(APIEvent(APIEvent::Type::TooManyEvents, APIEvent::Severity::EventWarning));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t getEventLimit() const { return eventLimit; }
|
size_t getEventLimit() const {
|
||||||
|
std::shared_lock<std::shared_mutex> lk(mutex);
|
||||||
|
return eventLimit;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
EventManager() : mutex(), events(), lastUserErrors(), eventLimit(10000) {}
|
EventManager() : mutex(), events(), lastUserErrors(), eventLimit(10000) {}
|
||||||
|
EventManager(const EventManager &other);
|
||||||
|
EventManager& operator=(const EventManager &other);
|
||||||
|
|
||||||
// Used by functions for threadsafety
|
// Used by functions for threadsafety
|
||||||
mutable std::mutex mutex;
|
mutable std::shared_mutex mutex;
|
||||||
|
|
||||||
// Stores all events
|
// Stores all events
|
||||||
std::list<APIEvent> events;
|
std::list<APIEvent> events;
|
||||||
std::unordered_map<std::thread::id, APIEvent> lastUserErrors;
|
std::map<std::thread::id, APIEvent> lastUserErrors;
|
||||||
size_t eventLimit = 10000;
|
size_t eventLimit = 10000;
|
||||||
|
|
||||||
size_t count_internal(EventFilter filter = EventFilter()) const;
|
size_t count_internal(EventFilter filter = EventFilter()) const;
|
||||||
|
|
@ -89,9 +95,10 @@ private:
|
||||||
* If the key id already exists in the map, replace the event of that pair with the new one
|
* If the key id already exists in the map, replace the event of that pair with the new one
|
||||||
*/
|
*/
|
||||||
void add_internal_error(APIEvent event) {
|
void add_internal_error(APIEvent event) {
|
||||||
auto iter = lastUserErrors.find(std::this_thread::get_id());
|
std::thread::id id = std::this_thread::get_id();
|
||||||
|
std::map<std::thread::id, APIEvent>::iterator iter = lastUserErrors.find(id);
|
||||||
if(iter == lastUserErrors.end())
|
if(iter == lastUserErrors.end())
|
||||||
lastUserErrors.insert(std::make_pair(std::this_thread::get_id(), event));
|
lastUserErrors.insert({id, event});
|
||||||
else
|
else
|
||||||
iter->second = event;
|
iter->second = event;
|
||||||
}
|
}
|
||||||
|
|
@ -120,7 +127,7 @@ private:
|
||||||
|
|
||||||
bool enforceLimit(); // Returns whether the limit enforcement resulted in an overflow
|
bool enforceLimit(); // Returns whether the limit enforcement resulted in an overflow
|
||||||
|
|
||||||
APIEvent::Severity lowestCurrentSeverity();
|
APIEvent::Severity lowestCurrentSeverity() const;
|
||||||
void discardLeastSevere(size_t count = 1);
|
void discardLeastSevere(size_t count = 1);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue